Hi have implemented simple file exchange over a client/server connection in c++. Works fine except for the one problem that its so damn slow. This is my code:
For sending the file:
int send_file(int fd)
{
char rec[10];
struct stat stat_buf;
fstat (fd, &stat_buf);
int size=stat_buf.st_size;
while(size > 0)
{
char buffer[1024];
bzero(buffer,1024);
bzero(rec,10);
int n;
if(size>=1024)
{
n=read(fd, buffer, 1024);
// Send a chunk of data
n=send(sockFile_, buffer, n, 0 );
// Wait for an acknowledgement
n = recv(sockFile_, rec, 10, 0 );
}
else // reamining file bytes
{
n=read(fd, buffer, size);
buffer[size]='\0';
send(sockFile_,buffer, n, 0 );
n=recv(sockFile_, rec, 10, 0 ); // ack
}
size -= 1024;
}
// Send a completion string
int n = send(sockFile_, "COMP",strlen("COMP"), 0 );
char buf[10];
bzero(buf,10);
// Receive an acknowledgemnt
n = recv(sockFile_, buf, 10, 0 );
return(0);
}
And for receiving the file:
int receive_file(int size, const char* saveName)
{
ofstream outFile(saveName,ios::out|ios::binary|ios::app);
while(size > 0)
{
// buffer for storing incoming data
char buf[1024];
bzero(buf,1024);
if(size>=1024)
{
// receive chunk of data
n=recv(sockFile_, buf, 1024, 0 );
// write chunk of data to disk
outFile.write(buf,n);
// send acknowledgement
n = send(sockFile_, "OK", strlen("OK"), 0 );
}
else
{
n=recv(sockFile_, buf, size, 0 );
buf[size]='\0';
outFile.write(buf,n);
n = send(sockFile_, "OK", strlen("OK"), 0 );
}
size -= 1024;
}
outFile.close();
// Receive 'COMP' and send acknowledgement
// ---------------------------------------
char buf[10];
bzero(buf,10);
n = recv(sockFile_, buf, 10, 0 );
n = send(sockFile_, "OK", strlen("OK"), 0 );
std::cout<<"File received..."<<std::endl;
return(0);
}
Now here are my initial thoughts: Perhaps the buffer is too small. I should therefore try increasing the size from I dunno, 1024 bytes (1KB) to 65536 (64KB) blocks, possibly. But this results in file corruption. Ok, so perhaps the code is also being slowed down by the need to receive an acknowledgement after each 1024 byte block of data has been sent, so why not remove them? Unfortunately this results in the blocks not arriving in the correct order and hence file corruption.
Perhaps I could split the file into chunks before hand and create multiple connections and send each chunk over its own threaded connection and then reassemble the chunks somehow in the receiver....
Any idea how I could make the file transfer process more efficient (faster)?
Thanks, Ben.