I am trying to limit my application send rate to 900kbps but the problem is that the protocol I use is message oriented and the messages have very different sizes. I can have messages from 40 bytes all the way up to 125000 bytes and all messages are send as atomic units.
I tried implementing a token bucket buffer but if I set a low bucket size the big packets never get send and a larger bucket will result in a large burst with no rate limiting at all.
This is my small implementation in C:
typedef struct token_buffer {
size_t capacity;
size_t tokens;
double rate;
uint64_t timestamp;
} token_buffer;
static uint64_t time_now()
{
struct timeval ts;
gettimeofday(&ts, NULL);
return (uint64_t)(ts.tv_sec * 1000 + ts.tv_usec/1000);
}
static int token_buffer_init(token_buffer *tbf, size_t max_burst, double rate)
{
tbf->capacity = max_burst;
tbf->tokens = max_burst;
tbf->rate = rate;
tbf->timestamp = time_now();
}
static size_t token_buffer_consume(token_buffer *tbf, size_t bytes)
{
// Update the tokens
uint64_t now = time_now();
size_t delta = (size_t)(tbf->rate * (now - tbf->timestamp));
tbf->tokens = (tbf->capacity < tbf->tokens+delta)?tbf->capacity:tbf->tokens+delta;
tbf->timestamp = now;
fprintf(stdout, "TOKENS %d bytes: %d\n", tbf->tokens, bytes);
if(bytes <= tbf->tokens) {
tbf->tokens -= bytes;
} else {
return -1;
}
return 0;
}
Then somewhere in main():
while(1) {
len = read_msg(&msg, file);
// Loop until we have enough tokens.
// if len is larger than the bucket capacity the loop never ends.
// if the capacity is too large then no rate limit occurs.
while(token_buffer_consume(&tbf,msg, len) != 0) {}
send_to_net(&msg, len);
}