I have a protocol that requires a length field up to 32-bits, and it must be generated at runtime to describe how many bytes are in a given packet.
The code below is kind of ugly but I am wondering if this can be refactored to be slightly more efficient or easily understandable. The problem is that the code will only generate enough bytes to describe the lenght of the packet, so less than 255 bytes = 1 byte of length, less than 65535 = 2 bytes of length etc...
{ extern char byte_stream[]; int bytes = offset_in_packet; int n = length_of_packet; /* Under 4 billion, so this can be represented in 32 bits. / int t; / 32-bit number used for temporary storage. */
/* These are the bytes we will break up n into. */
unsigned char first, second, third, fourth;
t = n & 0xFF000000;
/* We have used AND to "mask out" the first byte of the number. */
/* The only bits which can be on in t are the first 8 bits. */
first = t >> 24;
if (t) {
printf("byte 1: 0x%02x\n",first );
byte_stream[bytes] = first; bytes++;
write_zeros = 1;
}
/* Now we shift t so that it is between 0 and 255. This is the first, highest byte of n. */
t = n & 0x00FF0000;
second = t >> 16;
if (t || write_zeros) {
printf("byte 2: 0x%02x\n", second );
byte_stream[bytes] = second; bytes++;
write_zeros = 1;
}
t = n & 0x0000FF00;
third = t >> 8;
if ( t || write_zeros) {
printf("byte 3: 0x%02x\n", third );
byte_stream[bytes] = third; bytes++;
write_zeros = 1;
}
t = n & 0x000000FF;
fourth = t;
if (t || write_zeros) {
printf("byte 4: 0x%02x\n", fourth);
byte_stream[bytes] = fourth; bytes++;
}
}