I've a program that does Block Nested loop join (link text). Basically what it does is, it reads contents from a file (say 10GB file) into buffer1 (say 400MB), puts it into a hash table. Now read contents of the second file (say 10GB file) into buffer 2 (say 100MB) and see if the elements in buffer2 are present in the hash. Outputting the result doesn't matter. I'm just concerned with efficiency of the program for now. In this program, I need to read 8 bytes at a time from both files so I use long long int. The problem is my program is very inefficient. How can I make it efficient ?
// I compile using g++ -o hash hash.c -std=c++0x
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
#include <limits.h>
#include <iostream>
#include <algorithm>
#include <vector>
#include <unordered_map>
using namespace std;
typedef std::unordered_map<unsigned long long int, unsigned long long int> Mymap;
int main()
{
uint64_t block_size1 = (400*1024*1024)/sizeof(long long int); //block size of Table A - division operator used to make the block size 1 mb - refer line 26,27 malloc statements.
uint64_t block_size2 = (100*1024*1024)/sizeof(long long int); //block size of table B
int i=0,j=0, k=0;
uint64_t x,z,l=0;
unsigned long long int *buffer1 = (unsigned long long int *)malloc(block_size1 * sizeof(long long int));
unsigned long long int *buffer2 = (unsigned long long int *)malloc(block_size2 * sizeof(long long int));
Mymap c1 ; // Hash table
//Mymap::iterator it;
FILE *file1 = fopen64("10G1.bin","rb"); // Input is a binary file of 10 GB
FILE *file2 = fopen64("10G2.bin","rb");
printf("size of buffer1 : %llu \n", block_size1 * sizeof(long long int));
printf("size of buffer2 : %llu \n", block_size2 * sizeof(long long int));
while(!feof(file1))
{
k++;
printf("Iterations completed : %d \n",k);
fread(buffer1, sizeof(long long int), block_size1, file1); // Reading the contents into the memory block from first file
for ( x=0;x< block_size1;x++)
c1.insert(Mymap::value_type(buffer1[x], x)); // inserting values into the hash table
// std::cout << "The size of the hash table is" << c1.size() * sizeof(Mymap::value_type) << "\n" << endl;
/* // display contents of the hash table
for (Mymap::const_iterator it = c1.begin();it != c1.end(); ++it)
std::cout << " [" << it->first << ", " << it->second << "]";
std::cout << std::endl;
*/
while(!feof(file2))
{
i++; // Counting the number of iterations
// printf("%d\n",i);
fread(buffer2, sizeof(long long int), block_size2, file2); // Reading the contents into the memory block from second file
for ( z=0;z< block_size2;z++)
c1.find(buffer2[z]); // finding the element in hash table
// if((c1.find(buffer2[z]) != c1.end()) == true) //To check the correctness of the code
// l++;
// printf("The number of elements equal are : %llu\n",l); // If input files have exactly same contents "l" should print out the block_size2
// l=0;
}
rewind(file2);
c1.clear(); //clear the contents of the hash table
}
free(buffer1);
free(buffer2);
fclose(file1);
fclose(file2);
}
Update :
Is it possible to directly read a chunk (say 400 MB) from a file and directly put it into hash table using C++ stream readers? I think that can further reduce the overhead.