//mimic a read for which PE1 is under kappa_prime and PE2 too. Such a read is expected to be removed by low filter
T_read_numericValuesread_values3;
for(i=1;i<=200;i++){
read_values3.single_or_PE_val.push_back(i);
}
read_values3.idx_start_PE2=201;
for(i=1;i<=205;i++){
read_values3.single_or_PE_val.push_back(i);
}
ret=cms->addRead(read_values3);
assert(ret>0);
ret=cms->isBeneathMinKappa(read_values3);
assert(ret>0);
//mimic a read for which PE1 is over kappa_prime but not PE2
T_read_numericValuesread_values4;
for(i=201;i<=399;i++){
read_values4.single_or_PE_val.push_back(200);
}
read_values4.idx_start_PE2=199;
for(i=201;i<=399;i++){
read_values4.single_or_PE_val.push_back(i);
}
ret=cms->addRead(read_values4);
assert(ret>0);
ret=cms->isBeneathMinKappa(read_values4);
assert(ret>0);
//mimic a read for which PE2 is over kappa_prime but not PE1
T_read_numericValuesread_values5;
for(i=201;i<=399;i++){
read_values5.single_or_PE_val.push_back(i);
}
read_values5.idx_start_PE2=199;
for(i=201;i<=399;i++){
read_values5.single_or_PE_val.push_back(200);
}
ret=cms->addRead(read_values5);
assert(ret>0);
ret=cms->isBeneathMinKappa(read_values5);
assert(ret>0);
deletecms;
}
/*
* TODO: move this somewhere else.
* I use this test because it is in the calculation of the median that we spend the most time (approx: 50%) . I want to know what in it takes the longest to see if I can improve.
...
...
@@ -243,8 +358,10 @@ int main(int argc, char **argv) {
cout<<"testing CMS with lambda="<<lambda<<endl;
test_CMS(lambda,kappa,kappa_prime);// Finally using C arrays (maps implied storing hash keys : 4 Bytes per k_mer overhead) but each array is of size INT_MAX...
cout<<"testing CMS with PE not as single"<<endl;
cout<<"testing CMS with PE not as single strict kappa prime implementation"<<endl;