views:

1745

answers:

3

I am getting this error message while trying to do the following

#include <vector>
#include <algorithm>
using namespace std;

class NN
{
public:
    NN(const int numLayers,const int *lSz,const int AFT,const int OAF,const double initWtMag,const int UEW,const double *extInitWt);
    double sse;
    bool operator < (const NN &net) const {return sse < net.sse;}
};

class Pop
{
    int popSize;
    double a;
public:

    Pop(const int numLayers,const int *lSz,const int AFT,const int OAF,const double initWtMag,const int numNets,const double alpha);
    ~Pop();
    vector<NN> nets;
    void GA(...);
};

Pop::Pop(const int numLayers,const int *lSz,const int AFT,const int OAF,
      const double initWtMag,const int numNets,const double alpha)
{
    popSize=numNets;
    a=alpha;
    nets.reserve(popSize);
    for(int i=0;i<popSize;i++)
    {
     NN *net = new NN (numLayers,lSz,AFT,OAF,initWtMag,0,0);
     nets.push_back(*net);
    }
}

void Pop::GA()
{
...
     sort(nets.begin(),nets.end());
...
}

The error appears to be related to the sort function. I check all instances of nets vector and they seem to be OK, having different sse's. The funny thing is that I created a simpler case of the above code (see below) and it worked without any errors. I am wrecking my brain. Please help.

#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
using namespace std;

class Student
{
public:
    string name;
    double grade;
    Student(string,double);
    bool operator < (const Student &st) const {return grade < st.grade;}
};

Student::Student(string stName,double stGrade)
{
    name = stName;
    grade = stGrade;
}

int main()
{
    vector<Student> group;
    Student *st;
    st = new Student("Bill",3.5);
    group.push_back(*st);
    st = new Student("John",3.9);
    group.push_back(*st);
    st = new Student("Dave",3.1);
    group.push_back(*st);
    sort(group.begin(),group.end());
    for each(Student st in group)
     cout << st.name << " " << st.grade << endl;
    cin.get();
    return(0);
}
+2  A: 

The _BLOCK_TYPE_IS_VALID assertion gets fired, when you overwrite the header of an block allocated by new. This happens when you slice objects, use dead objects, etc.

You should have a look at your complete code, and try to work from the data you have in your debugger. This short code snippet contains several 'curious' usage of C++, but no obvious point at which this produces the described error (at least for me).

Christopher
A: 

Thanks everybody. First, I clear the memory allocated for nets vector inside the Pop destructor by

Pop::~Pop()
{
    //nets.clear();
    nets.~vector<NN>();
}

The error message does not say much and I would appreciate if somebody shows me how to make MSVC 2008 to show a more detailed info. Here is what it says (I can't cut and paste it for some reason, so I am retyping it):

Debug assertion failed!
Programm: ... GANN.exe
File: ... dbgedl.cpp
line: 52
Expression: _BLOCK_TYPE_IS_VALID(pHead->nBlockUse)
For information how ...

When I press debug, the compiler shows me line 52 of file dbgdel.cpp:

_ASSERTE(_BLOCK_TYPE_IS_VALID(pHead->nBlockUse));

inside

void operator delete(void *pUserData)

Here is a more of my code showing what happens before I try to sort

double Pop::GA(...)
{
    for (int gen=0;gen<ngen;gen++)
    {
     int istart=0;
     if(gen>0) istart=eliteSize;
     for(int i=istart;i<popSize;i++)
      nets[i].getSSE(in,tgt,ntr,discount);

     for(int i=istart;i<popSize;i++)
     {
      cout << i << " " << nets[i].sse << endl;
     }

     sort(nets.begin(),nets.end());

Everything works properly up to the sort() point. The lSz pointer is used inside NN to hold the number of nodes in each layer of the neural network, for example lSz[3]={12,5,1} (12 inputs, one hidden layer with 5 neurons and one output). It is used to create a 3D array of the weights for each connection of the network. Each network NN (there are 100 of them) inside the Population has its own weight array. But they share the same lSz[] and other structural parameters, which unfortunately get copied from other NN instance to the other. I wanted to use static to declare these shared class members, but that would prevent parallelization.

A: 

I just discovered that if I do Pop construction like this

Pop::Pop(const int numLayers,const int *lSz,const int AFT,const int OAF,
      const double initWtMag,const int numNets,const double alpha)
{
    popSize=numNets;
    a=alpha;
    cout << "defined a\n";
    nets.reserve(popSize);
    NN *net = new NN (numLayers,lSz,AFT,OAF,initWtMag,0,0);
    for(int i=0;i<popSize;i++)
    {
     //NN *net = new NN (numLayers,lSz,AFT,OAF,initWtMag,0,0);
     nets.push_back(*net);
    }
}

Then everything works, including sort(). But, that does not work for me because now the nets vector contains the same instance of NN popSize times. The idea was to intialize each of these instances individually. Each instance of NN is supposed to have its own array of weights, randomly initialized inside the NN constructor:

NN::NN(const int numLayers,const int *lSz,const int AFT,const int OAF,const double initWtMag,
       const int UEW,const double *extInitWt)
{
//  set number of layers and their sizes
    nl=numLayers;
    ls=new int[nl];
    for(int i=0;i<nl;i++) ls[i]=lSz[i];

//  set other parameters
    aft=AFT;
    oaf=OAF;
    binMid=0.0;
    if(aft==0) binMid=0.5;

//  allocate memory for output of each neuron
    out = new double*[nl];
    for(int i=0;i<nl;i++) out[i]=new double[ls[i]];

//  allocate memory for weights (genes)
//  w[lr #][neuron # in this lr][input # = neuron # in prev lr]
    w = new double**[nl];
    for(int i=1;i<nl;i++) w[i]=new double*[ls[i]];
    for(int i=1;i<nl;i++)     // for each layer except input
     for(int j=0;j<ls[i];j++)   // for each neuron in current layer
      w[i][j]=new double[ls[i-1]+1]; // w[][][ls[]] is bias

//  seed and assign random weights (genes)
    SYSTEMTIME tStart,tCurr;
    GetSystemTime(&tStart);
    for(;;)
    {
     GetSystemTime(&tCurr);
     if(tCurr.wMilliseconds!=tStart.wMilliseconds) break;
    }
    srand(tCurr.wMilliseconds);
    int iw=0;
    for(int i=1;i<nl;i++)     // for each layer except input
     for(int j=0;j<ls[i];j++)   // for each neuron in current layer
      for(int k=0;k<=ls[i-1];k++)  // for each input of curr neuron incl bias
       if(UEW==0) w[i][j][k]=initWtMag*2.0*(rand()/(double)RAND_MAX-0.5);
       else w[i][j][k]=extInitWt[iw++];
}