That's all part of the UTF8 encoding (which is only one encoding scheme for Unicode).
The size can figured out by examining the first byte as follows:
- if it starts with bit pattern
"10" (0x80-0xbf)
, it's not the first byte of a sequence and you should back up until you find the start, any byte that starts with "0" or "11" (thanks to Jeffrey Hantin for pointing that out in the comments).
- if it starts with bit pattern
"0" (0x00-0x7f)
, it's 1 byte.
- if it starts with bit pattern
"110" (0xc0-0xdf)
, it's 2 bytes.
- if it starts with bit pattern
"1110" (0xe0-0xef)
, it's 3 bytes.
- if it starts with bit pattern
"11110" (0xf0-0xf7)
, it's 4 bytes.
I'll duplicate the table showing this, but the original is on the Wikipedia UTF8 page here.
+----------------+----------+----------+----------+----------+
| Unicode | Byte 1 | Byte 2 | Byte 3 | Byte 4 |
+----------------+----------+----------+----------+----------+
| U+0000-007F | 0xxxxxxx | | | |
| U+0080-07FF | 110yyyxx | 10xxxxxx | | |
| U+0800-FFFF | 1110yyyy | 10yyyyxx | 10xxxxxx | |
| U+10000-10FFFF | 11110zzz | 10zzyyyy | 10yyyyxx | 10xxxxxx |
+----------------+----------+----------+----------+----------+
The Unicode characters in the above table are constructed from the bits:
000z-zzzz yyyy-yyyy xxxx-xxxx
where the z
and y
bits are assumed to be zero where they're not given. Some bytes are considered illegal as a start byte since they're either:
- useless: a 2-byte sequence starting with 0xc0 or 0xc1 actually gives a code point less than 0x80 which can be represented better with a 1-byte sequence.
- used by RFC3629 for 4-byte sequence above U+10FFFF, or 5-byte and 6-byte sequences. These are the bytes 0xf5 through 0xfd.
- just unused: bytes 0xfe and 0xff.
In addition, subsequent bytes in a multi-byte sequence that don't begin with the bits "10" are also illegal.
As an example, consider the sequence [0xf4,0x8a,0xaf,0x8d]. This is a 4-byte sequence as the first byte falls between 0xf0 and 0xf7.
0xf4 0x8a 0xaf 0x8d
= 11110100 10001010 10101111 10001101
zzz zzyyyy yyyyxx xxxxxx
= 1 0000 1010 1011 1100 1101
z zzzz yyyy yyyy xxxx xxxx
= U+10ABCD
For your specific query with the first byte 0xe6 (length = 3), the byte sequence is:
0xe6 0xbe 0xb3
= 11100110 10111110 10110011
yyyy yyyyxx xxxxxx
= 01101111 10110011
yyyyyyyy xxxxxxxx
= U+6FB3
If you look that code up here, you'll see it's the one you had in your question: 澳.
To show how the decoding works, I went back to my archives to find my UTF8 handling code. I've had to morph it a bit to make it a complete program and the encoding has been removed (since the question was really about decoding), so I hope I haven't introduced any errors from the cut and paste:
#include <stdio.h>
#include <string.h>
#define UTF8ERR_TOOSHORT -1
#define UTF8ERR_BADSTART -2
#define UTF8ERR_BADSUBSQ -3
typedef unsigned char uchar;
static int getUtf8 (uchar *pBytes, int *pLen) {
if (*pLen < 1) return UTF8ERR_TOOSHORT;
/* 1-byte sequence */
if (pBytes[0] <= 0x7f) {
*pLen = 1;
return pBytes[0];
}
/* Subsequent byte marker */
if (pBytes[0] <= 0xbf) return UTF8ERR_BADSTART;
/* 2-byte sequence */
if ((pBytes[0] == 0xc0) || (pBytes[0] == 0xc1)) return UTF8ERR_BADSTART;
if (pBytes[0] <= 0xdf) {
if (*pLen < 2) return UTF8ERR_TOOSHORT;
if ((pBytes[1] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
*pLen = 2;
return ((int)(pBytes[0] & 0x1f) << 6)
| (pBytes[1] & 0x3f);
}
/* 3-byte sequence */
if (pBytes[0] <= 0xef) {
if (*pLen < 3) return UTF8ERR_TOOSHORT;
if ((pBytes[1] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
if ((pBytes[2] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
*pLen = 3;
return ((int)(pBytes[0] & 0x0f) << 12)
| ((int)(pBytes[1] & 0x3f) << 6)
| (pBytes[2] & 0x3f);
}
/* 4-byte sequence */
if (pBytes[0] <= 0xf4) {
if (*pLen < 4) return UTF8ERR_TOOSHORT;
if ((pBytes[1] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
if ((pBytes[2] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
if ((pBytes[3] & 0xc0) != 0x80) return UTF8ERR_BADSUBSQ;
*pLen = 4;
return ((int)(pBytes[0] & 0x0f) << 18)
| ((int)(pBytes[1] & 0x3f) << 12)
| ((int)(pBytes[2] & 0x3f) << 6)
| (pBytes[3] & 0x3f);
}
return UTF8ERR_BADSTART;
}
static uchar htoc (char *h) {
uchar u = 0;
while (*h != '\0') {
if ((*h >= '0') && (*h <= '9'))
u = ((u & 0x0f) << 4) + *h - '0';
else
if ((*h >= 'a') && (*h <= 'f'))
u = ((u & 0x0f) << 4) + *h + 10 - 'a';
else
return 0;
h++;
}
return u;
}
int main (int argCount, char *argVar[]) {
int i;
uchar utf8[4];
int len = argCount - 1;
if (len != 4) {
printf ("Usage: utf8 <hex1> <hex2> <hex3> <hex4>\n");
return 1;
}
printf ("Input: (%d) %s %s %s %s\n",
len, argVar[1], argVar[2], argVar[3], argVar[4]);
for (i = 0; i < 4; i++)
utf8[i] = htoc (argVar[i+1]);
printf (" Becomes: (%d) %02x %02x %02x %02x\n",
len, utf8[0], utf8[1], utf8[2], utf8[3]);
if ((i = getUtf8 (&(utf8[0]), &len)) < 0)
printf ("Error %d\n", i);
else
printf (" Finally: U+%x, with length of %d\n", i, len);
return 0;
}
You can run it with your sequence of bytes (you'll need 4 so use 0 to pad them out) as follows:
> utf8 f4 8a af 8d
Input: (4) f4 8a af 8d
Becomes: (4) f4 8a af 8d
Finally: U+10abcd, with length of 4
> utf8 e6 be b3 0
Input: (4) e6 be b3 0
Becomes: (4) e6 be b3 00
Finally: U+6fb3, with length of 3
> utf8 41 0 0 0
Input: (4) 41 0 0 0
Becomes: (4) 41 00 00 00
Finally: U+41, with length of 1
> utf8 87 0 0 0
Input: (4) 87 0 0 0
Becomes: (4) 87 00 00 00
Error -2
> utf8 f4 8a af ff
Input: (4) f4 8a af ff
Becomes: (4) f4 8a af ff
Error -3
> utf8 c4 80 0 0
Input: (4) c4 80 0 0
Becomes: (4) c4 80 00 00
Finally: U+100, with length of 2