Fixing things up (bad commits?!)
[mldemos:baraks-mldemos.git] / _AlgorithmsPlugins / OpenCV / classifierBoost.cpp
1 /*********************************************************************\r
2 MLDemos: A User-Friendly visualization toolkit for machine learning\r
3 Copyright (C) 2010  Basilio Noris\r
4 Contact: mldemos@b4silio.com\r
5 \r
6 This library is free software; you can redistribute it and/or\r
7 modify it under the terms of the GNU Lesser General Public\r
8 License as published by the Free Software Foundation; either\r
9 version 2.1 of the License, or (at your option) any later version.\r
10 \r
11 This library is distributed in the hope that it will be useful,\r
12 but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
14 Library General Public License for more details.\r
15 \r
16 You should have received a copy of the GNU Lesser General Public\r
17 License along with this library; if not, write to the Free\r
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\r
19 *********************************************************************/\r
20 #include "public.h"\r
21 #include "basicMath.h"\r
22 #include "classifierBoost.h"\r
23 #include <QDebug>\r
24 \r
25 using namespace std;\r
26 \r
27 ClassifierBoost::ClassifierBoost()\r
28     : model(0), weakCount(0), scoreMultiplier(1.f), boostType(CvBoost::GENTLE)\r
29 {\r
30         bSingleClass = false;\r
31 }\r
32 \r
33 ClassifierBoost::~ClassifierBoost()\r
34 {\r
35         if(model) model->clear();\r
36         DEL(model);\r
37 }\r
38 \r
39 vector<fvec> ClassifierBoost::learners;\r
40 int ClassifierBoost::currentLearnerType = -1;\r
41 int ClassifierBoost::learnerCount=1000;\r
42 int ClassifierBoost::svmCount=2;\r
43 \r
44 void ClassifierBoost::InitLearners(fvec xMin, fvec xMax)\r
45 {\r
46     srand(1); // so we always generate the same weak learner\r
47     switch(weakType)\r
48     {\r
49     case 0: // stumps\r
50         learnerCount = dim;\r
51         break;\r
52     case 1: // projections\r
53         learnerCount = dim>2?1000:360;\r
54         break;\r
55     case 2: // rectangles\r
56     case 3: // circles\r
57     case 4: // gmm\r
58     case 5: // svm\r
59         learnerCount = 3000;\r
60         break;\r
61     }\r
62     learnerCount = max(learnerCount, (int)weakCount);\r
63 \r
64     learners.clear();\r
65     learners.resize(learnerCount);\r
66     // we generate a bunch of random directions as learners\r
67 //      srand(1);\r
68     switch(weakType)\r
69     {\r
70     case 0:// stumps\r
71     {\r
72         FOR(i, learnerCount)\r
73         {\r
74             learners[i].resize(1);\r
75             learners[i][0] = i % dim; // we choose a single dimension\r
76         }\r
77     }\r
78         break;\r
79     case 1:// random projection\r
80     {\r
81         if(dim==2)\r
82         {\r
83             FOR(i, learnerCount)\r
84             {\r
85                 learners[i].resize(dim);\r
86                 float theta = i / (float)learnerCount * PIf;\r
87                 learners[i][0] = cosf(theta);\r
88                 learners[i][1] = sinf(theta);\r
89             }\r
90         }\r
91         else\r
92         {\r
93             FOR(i, learnerCount)\r
94             {\r
95                 learners[i].resize(dim);\r
96                 fvec projection(dim,0);\r
97                 float norm = 0;\r
98                 FOR(d, dim)\r
99                 {\r
100                     projection[d] = drand48();\r
101                     norm += projection[d];\r
102                 }\r
103                 FOR(d, dim) learners[i][d] = projection[d] / norm;\r
104             }\r
105         }\r
106     }\r
107         break;\r
108     case 2: // random rectangle\r
109     {\r
110         FOR(i, learnerCount)\r
111         {\r
112             learners[i].resize(dim*2);\r
113             FOR(d, dim)\r
114             {\r
115                 float x = drand48()*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
116                 //float x = (drand48()*2-0.5)*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
117                 float l = drand48()*(xMax[d] - xMin[d]); // width\r
118                 //float x = drand48()*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
119                 //float l = drand48()*(xMax[d] - xMin[d]); // width\r
120                 learners[i][2*d] = x;\r
121                 learners[i][2*d+1] = l;\r
122             }\r
123         }\r
124     }\r
125         break;\r
126     case 3: // random circle\r
127     {\r
128         if(dim==2)\r
129         {\r
130             FOR(i, learnerCount)\r
131             {\r
132                 learners[i].resize(dim);\r
133                 learners[i][0] =  drand48()*(xMax[0]-xMin[0]) + xMin[0];\r
134                 learners[i][1] =  drand48()*(xMax[1]-xMin[1]) + xMin[1];\r
135             }\r
136         }\r
137         else\r
138         {\r
139             FOR(i, learnerCount)\r
140             {\r
141                 learners[i].resize(dim);\r
142                 FOR(d, dim) learners[i][d] = drand48()*(xMax[d]-xMin[d]) + xMin[d];\r
143             }\r
144         }\r
145     }\r
146         break;\r
147     case 4: // random GMM\r
148     {\r
149         FOR(i, learnerCount)\r
150         {\r
151             learners[i].resize(dim + dim*(dim+1)/2); // a center plus a covariance matrix\r
152             // we generate a random center\r
153             FOR(d, dim)\r
154             {\r
155                 learners[i][d] = drand48()*(xMax[d] - xMin[d]) + xMin[d];\r
156             }\r
157             // we generate a random covariance matrix\r
158             float minLambda = (xMax[0]-xMin[0])*0.01f; // we set the minimum covariance lambda to 1% of the data span\r
159             fvec C = RandCovMatrix(dim, minLambda);\r
160             FOR(d1, dim)\r
161             {\r
162                 FOR(d2,d1+1)\r
163                 {\r
164                     int index = d1*(d1+1)/2 + d2; // index in triangular matrix form\r
165                     learners[i][dim + index] = C[d1*dim + d2];\r
166                 }\r
167             }\r
168         }\r
169     }\r
170         break;\r
171     case 5: // random SVM\r
172     {\r
173         FOR(i, learnerCount)\r
174         {\r
175             learners[i].resize(1 + svmCount*(dim+1)); // a kernel width plus svmCount points plus svmCount alphas\r
176             learners[i][0] = 1.f / drand48()*(xMax[0]-xMin[0]); // kernel width proportional to the data\r
177             float sumAlpha=0;\r
178             FOR(j, svmCount)\r
179             {\r
180                 // we generate a random alpha\r
181                 if(j<svmCount-1) sumAlpha += (learners[i][1+(dim+1)*j] = drand48()*2.f - 1.f);\r
182                 else learners[i][1+(dim+1)*j] = -sumAlpha; // we ensure that the sum of all alphas is zero\r
183                 // and the coordinates of the SV\r
184                 FOR(d, dim)\r
185                 {\r
186                     learners[i][1+(dim+1)*j+1 + d] = drand48()*(xMax[d]-xMin[d])+xMin[d];\r
187                 }\r
188             }\r
189         }\r
190     }\r
191         break;\r
192     }\r
193     currentLearnerType = weakType;\r
194 }\r
195 \r
196 void ClassifierBoost::Train( std::vector< fvec > samples, ivec labels )\r
197 {\r
198         if(model)model->clear();\r
199         u32 sampleCnt = samples.size();\r
200         if(!sampleCnt) return;\r
201         DEL(model);\r
202         dim = samples[0].size();\r
203         u32 *perm = randPerm(sampleCnt);\r
204     this->samples = samples;\r
205     this->labels = labels;\r
206 \r
207     // we need to find the boundaries\r
208     fvec xMin(dim, FLT_MAX), xMax(dim, -FLT_MAX);\r
209     FOR(i, samples.size())\r
210     {\r
211         FOR(d,dim)\r
212         {\r
213             if(xMin[d] > samples[i][d]) xMin[d] = samples[i][d];\r
214             if(xMax[d] < samples[i][d]) xMax[d] = samples[i][d];\r
215         }\r
216     }\r
217 \r
218     // we need to regenerate the learners\r
219     if(currentLearnerType != weakType || learners.size() != learnerCount) InitLearners(xMin, xMax);\r
220 \r
221         CvMat *trainSamples = cvCreateMat(sampleCnt, learnerCount, CV_32FC1);\r
222         CvMat *trainLabels = cvCreateMat(labels.size(), 1, CV_32FC1);\r
223         CvMat *sampleWeights = cvCreateMat(samples.size(), 1, CV_32FC1);\r
224 \r
225     switch(weakType)\r
226     {\r
227     case 0:// stumps\r
228     {\r
229         FOR(i, sampleCnt)\r
230         {\r
231             fvec sample = samples[perm[i]];\r
232             FOR(j, learnerCount)\r
233             {\r
234                 int index = learners[j][0];\r
235                 float val = index < dim ? sample[index] : 0;\r
236                 cvSetReal2D(trainSamples, i, j, val);\r
237             }\r
238             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
239             cvSet1D(sampleWeights, i, cvScalar(1));\r
240         }\r
241     }\r
242         break;\r
243     case 1:// random projection\r
244     {\r
245         if(dim == 2)\r
246         {\r
247             FOR(i, sampleCnt)\r
248             {\r
249                 fvec sample = samples[perm[i]];\r
250                 FOR(j, learnerCount)\r
251                 {\r
252                     float val = sample[0]* learners[j][0] + sample[1]* learners[j][1];\r
253                     cvSetReal2D(trainSamples, i, j, val);\r
254                 }\r
255                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
256                 cvSet1D(sampleWeights, i, cvScalar(1));\r
257             }\r
258 \r
259         }\r
260         else\r
261         {\r
262             FOR(i, sampleCnt)\r
263             {\r
264                 // project the sample in the direction of the learner\r
265                 fvec sample = samples[perm[i]];\r
266                 FOR(j, learnerCount)\r
267                 {\r
268                     float val = 0;\r
269                     FOR(d, dim) val += sample[d] * learners[j][d];\r
270                     cvSetReal2D(trainSamples, i, j, val);\r
271                 }\r
272                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
273                 cvSet1D(sampleWeights, i, cvScalar(1));\r
274             }\r
275         }\r
276     }\r
277         break;\r
278     case 2:// random rectangles\r
279         {\r
280                 FOR(i, sampleCnt)\r
281                 {\r
282                         // check if the sample is inside the recangle generated by the classifier\r
283             const fvec sample = samples[perm[i]];\r
284                         FOR(j, learnerCount)\r
285                         {\r
286                 float val = 1;\r
287                                 FOR(d, dim)\r
288                                 {\r
289                     if(sample[d] < learners[j][2*d] || sample[d] > learners[j][2*d]+learners[j][2*d+1])\r
290                     {\r
291                         val = 0;\r
292                         break;\r
293                     }\r
294                 }\r
295                 //cvSetReal2D(trainSamples, i, j, val); // we add a small noise to the value just to not have only 0s and 1s\r
296                 cvSetReal2D(trainSamples, i, j, val + drand48()*0.01); // we add a small noise to the value just to not have only 0s and 1s\r
297             }\r
298                         cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
299                         cvSet1D(sampleWeights, i, cvScalar(1));\r
300                 }\r
301         }\r
302         break;\r
303     case 3: // random circle\r
304     {\r
305         if(dim == 2)\r
306         {\r
307             FOR(i, sampleCnt)\r
308             {\r
309                 fvec sample = samples[perm[i]];\r
310                 FOR(j, learnerCount)\r
311                 {\r
312                     float val = 0;\r
313                     val = sqrtf((sample[0] - learners[j][0])*(sample[0] - learners[j][0])+\r
314                         (sample[1] - learners[j][1])*(sample[1] - learners[j][1]));\r
315                     cvSetReal2D(trainSamples, i, j, val);\r
316                 }\r
317                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
318                 cvSet1D(sampleWeights, i, cvScalar(1));\r
319             }\r
320 \r
321         }\r
322         else\r
323         {\r
324             FOR(i, sampleCnt)\r
325             {\r
326                 // project the sample in the direction of the learner\r
327                 fvec sample = samples[perm[i]];\r
328                 FOR(j, learnerCount)\r
329                 {\r
330                     float val = 0;\r
331                     FOR(d,dim) val += (sample[d] - learners[j][d])*(sample[d] - learners[j][d]);\r
332                     val = sqrtf(val);\r
333                     cvSetReal2D(trainSamples, i, j, val);\r
334                 }\r
335                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
336                 cvSet1D(sampleWeights, i, cvScalar(1));\r
337             }\r
338         }\r
339     }\r
340         break;\r
341     case 4: // random GMM\r
342     {\r
343         FOR(i, sampleCnt)\r
344         {\r
345             fvec sample = samples[perm[i]];\r
346             FOR(j, learnerCount)\r
347             {\r
348                 fvec &gmm = learners[j];\r
349                 float val = 0;\r
350                 fvec x(dim);\r
351                 FOR(d, dim) x[d] = sample[d]-gmm[d];\r
352                 FOR(d, dim)\r
353                 {\r
354                     float xC = 0;\r
355                     FOR(d1,dim)\r
356                     {\r
357                         int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
358                         xC += x[d1]*gmm[dim+index];\r
359                     }\r
360                     val += xC*x[d];\r
361                 }\r
362                 cvSetReal2D(trainSamples, i, j, val);\r
363             }\r
364             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
365             cvSet1D(sampleWeights, i, cvScalar(1));\r
366         }\r
367     }\r
368         break;\r
369     case 5: // random SVM\r
370     {\r
371         FOR(i, sampleCnt)\r
372         {\r
373             // compute the svm function\r
374             fvec sample = samples[perm[i]];\r
375             FOR(j, learnerCount)\r
376             {\r
377                 fvec &svm = learners[j];\r
378                 float val = 0;\r
379                 float gamma = svm[0];\r
380                 FOR(k, svmCount)\r
381                 {\r
382                     float alpha = svm[1+k*(dim+1)];\r
383                     // we compute the rbf kernel;\r
384                     float K = 0;\r
385                     int index = 1+k*(dim+1)+1;\r
386                     FOR(d, dim)\r
387                     {\r
388                         float dist = sample[d]-svm[index+d];\r
389                         K += dist*dist;\r
390                     }\r
391                     K *= gamma;\r
392                     val += alpha*expf(-K);\r
393                 }\r
394                 cvSetReal2D(trainSamples, i, j, val);\r
395             }\r
396             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
397             cvSet1D(sampleWeights, i, cvScalar(1));\r
398         }\r
399     }\r
400         break;\r
401     }\r
402 \r
403         CvMat *varType = cvCreateMat(trainSamples->width+1, 1, CV_8UC1);\r
404         FOR(i, trainSamples->width)\r
405         {\r
406                 CV_MAT_ELEM(*varType, u8, i, 0) = CV_VAR_NUMERICAL;\r
407         }\r
408         CV_MAT_ELEM(*varType, u8, trainSamples->width, 0) = CV_VAR_CATEGORICAL;\r
409 \r
410     int maxSplit = 1;\r
411     CvBoostParams params(boostType, weakCount, 0.95, maxSplit, false, NULL);\r
412         params.split_criteria = CvBoost::DEFAULT;\r
413         model = new CvBoost();\r
414         model->train(trainSamples, CV_ROW_SAMPLE, trainLabels, NULL, NULL, varType, NULL, params);\r
415 \r
416         CvSeq *predictors = model->get_weak_predictors();\r
417         int length = cvSliceLength(CV_WHOLE_SEQ, predictors);\r
418     //qDebug() << "length:" << length;\r
419         features.clear();\r
420         FOR(i, length)\r
421         {\r
422                 CvBoostTree *predictor = *CV_SEQ_ELEM(predictors, CvBoostTree*, i);\r
423                 CvDTreeSplit *split = predictor->get_root()->split;\r
424         if(!split) continue;\r
425                 features.push_back(split->var_idx);\r
426         }\r
427 \r
428     scoreMultiplier = 1.f;\r
429     float maxScore=-FLT_MAX, minScore=FLT_MAX;\r
430     FOR(i, samples.size())\r
431     {\r
432         float score = Test(samples[i]);\r
433         if(score > maxScore) maxScore = score;\r
434         if(score < minScore) minScore = score;\r
435     }\r
436     if(minScore != maxScore)\r
437     {\r
438         scoreMultiplier = 1.f/(max(abs((double)maxScore),abs((double)minScore)))*5.f;\r
439     }\r
440 \r
441     // we want to compute the error weight for each training sample\r
442     vector<fvec> responses(length);\r
443     FOR(i, length) responses[i].resize(sampleCnt);\r
444     // first we compute all the errors, for each learner\r
445     FOR(i, sampleCnt)\r
446     {\r
447         fvec response(length);\r
448         Test(samples[i], &response);\r
449         FOR(j, length) responses[j][i] = response[j];\r
450     }\r
451     // then we iterate through the learners\r
452     errorWeights = fvec(sampleCnt,1.f);\r
453     FOR(i, responses.size())\r
454     {\r
455         double sum = 0;\r
456         // we compute the current weighted error\r
457         FOR(j, sampleCnt)\r
458         {\r
459             double response = responses[i][j];\r
460             //debugString += QString("%1(%2) ").arg(response,0,'f',2).arg(labels[perm[j]]);\r
461             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) sum += fabs(response)*errorWeights[j]/sampleCnt;\r
462         }\r
463         //qDebug() << debugString;\r
464         double c = sqrtf(fabs((1-sum)/sum));\r
465 \r
466         // we update the individual weights\r
467         sum = 0;\r
468         FOR(j, sampleCnt)\r
469         {\r
470             double response = responses[i][j];\r
471             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) errorWeights[j] *= c;\r
472             else errorWeights[j] *= 1.f/c;\r
473             sum += errorWeights[j];\r
474         }\r
475         sum /= sampleCnt;\r
476         // and we renormalize them\r
477         FOR(j, sampleCnt) errorWeights[j] /= sum;\r
478     }\r
479 \r
480     //QString debugString;\r
481     //FOR(i, sampleCnt) debugString += QString("%1 ").arg(errorWeights[i],0,'f',3);\r
482     //qDebug() << "errorWeights" << debugString;\r
483 \r
484         cvReleaseMat(&trainSamples);\r
485         cvReleaseMat(&trainLabels);\r
486         cvReleaseMat(&sampleWeights);\r
487         cvReleaseMat(&varType);\r
488     delete [] perm;\r
489     trainSamples = 0;\r
490         trainLabels = 0;\r
491         sampleWeights = 0;\r
492         varType = 0;\r
493 }\r
494 \r
495 \r
496 float ClassifierBoost::Test( const fvec &sample )\r
497 {\r
498     return Test(sample, 0);\r
499 }\r
500 \r
501 float ClassifierBoost::Test( const fvec &sample, fvec *responses)\r
502 {\r
503     if(!model) return 0;\r
504     if(!learners.size()) return 0;\r
505 \r
506     CvMat *x = cvCreateMat(1, learners.size(), CV_32FC1);\r
507     switch(weakType)\r
508     {\r
509     case 0:\r
510     {\r
511         FOR(i, features.size())\r
512         {\r
513             int index = learners[features[i]][0];\r
514             float val = index < dim ? sample[index] : 0;\r
515             cvSetReal2D(x, 0, features[i], val);\r
516         }\r
517     }\r
518         break;\r
519     case 1:\r
520     {\r
521         if(dim == 2)\r
522         {\r
523             FOR(i, features.size())\r
524             {\r
525                 float val = sample[0] * learners[features[i]][0] + sample[1] * learners[features[i]][1];\r
526                 cvSetReal2D(x, 0, features[i], val);\r
527             }\r
528         }\r
529         else\r
530         {\r
531             FOR(i, features.size())\r
532             {\r
533                 float val = sample * learners[features[i]];\r
534                 cvSetReal2D(x, 0, features[i], val);\r
535             }\r
536         }\r
537     }\r
538         break;\r
539     case 2:\r
540     {\r
541         FOR(i, features.size())\r
542         {\r
543             int val = 1;\r
544             FOR(d, dim)\r
545             {\r
546                 if(sample[d] < learners[features[i]][2*d] ||\r
547                     sample[d] > learners[features[i]][2*d]+learners[features[i]][2*d+1])\r
548                 {\r
549                     val = 0;\r
550                     break;\r
551                 }\r
552             }\r
553             //cvSetReal2D(x, 0, features[i], val);\r
554             cvSetReal2D(x, 0, features[i], val + drand48()*0.1);\r
555         }\r
556     }\r
557         break;\r
558     case 3:\r
559     {\r
560         if(dim == 2)\r
561         {\r
562             FOR(i, features.size())\r
563             {\r
564                 float val = sqrtf((sample[0] - learners[features[i]][0])*(sample[0] - learners[features[i]][0])+\r
565                     (sample[1] - learners[features[i]][1])*(sample[1] - learners[features[i]][1]));\r
566                 cvSetReal2D(x, 0, features[i], val);\r
567             }\r
568         }\r
569         else\r
570         {\r
571             FOR(i, features.size())\r
572             {\r
573                 float val = 0;\r
574                 FOR(d,dim) val += (sample[d] - learners[features[i]][d])*(sample[d] - learners[features[i]][d]);\r
575                 val = sqrtf(val);\r
576                 cvSetReal2D(x, 0, features[i], val);\r
577             }\r
578         }\r
579     }\r
580         break;\r
581     case 4:\r
582     {\r
583         FOR(i, features.size())\r
584         {\r
585             float val = 0;\r
586             fvec &gmm = learners[features[i]];\r
587             fvec xt(dim);\r
588             FOR(d, dim) xt[d] = sample[d]-gmm[d];\r
589             FOR(d, dim)\r
590             {\r
591                 float xC = 0;\r
592                 FOR(d1,dim)\r
593                 {\r
594                     int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
595                     xC += xt[d1]*gmm[dim+index];\r
596                 }\r
597                 val += xC*xt[d];\r
598             }\r
599             cvSetReal2D(x, 0, features[i], val);\r
600         }\r
601     }\r
602         break;\r
603     case 5:\r
604     {\r
605         FOR(i, features.size())\r
606         {\r
607             fvec &svm = learners[features[i]];\r
608             float val = 0;\r
609             float gamma = svm[0];\r
610             FOR(k, svmCount)\r
611             {\r
612                 float alpha = svm[1+k*(dim+1)];\r
613                 // we compute the rbf kernel;\r
614                 float K = 0;\r
615                 int index = 1+k*(dim+1)+1;\r
616                 FOR(d, dim)\r
617                 {\r
618                     float dist = sample[d]-svm[index+d];\r
619                     K += dist*dist;\r
620                 }\r
621                 K *= gamma;\r
622                 val += alpha*expf(-K);\r
623             }\r
624             cvSetReal2D(x, 0, features[i], val);\r
625         }\r
626     }\r
627         break;\r
628     }\r
629 \r
630     // allocate memory for weak learner output\r
631     int length = cvSliceLength(CV_WHOLE_SEQ, model->get_weak_predictors());\r
632     CvMat *weakResponses = cvCreateMat(length, 1, CV_32FC1);\r
633     float y = model->predict(x, NULL, weakResponses, CV_WHOLE_SEQ);\r
634 \r
635     if(responses != NULL)\r
636     {\r
637         (*responses).resize(length);\r
638         FOR(i, length) (*responses)[i] = cvGet1D(weakResponses, i).val[0];\r
639     }\r
640     double score = cvSum(weakResponses).val[0] * scoreMultiplier;\r
641 \r
642     cvReleaseMat(&weakResponses);\r
643     cvReleaseMat(&x);\r
644     return score;\r
645 }\r
646 \r
647 void ClassifierBoost::SetParams( u32 weakCount, int weakType, int boostType, int svmCount)\r
648 {\r
649         this->weakCount = weakCount;\r
650     this->weakType = weakType;\r
651     this->boostType = boostType;\r
652     if(this->svmCount != svmCount)\r
653     {\r
654         // if we changed the number of svms we need to regenerate the learners\r
655         this->svmCount = svmCount;\r
656         if(weakType == 5) currentLearnerType = -1;\r
657     }\r
658 }\r
659 \r
660 const char *ClassifierBoost::GetInfoString()\r
661 {\r
662         char *text = new char[1024];\r
663         sprintf(text, "Boosting\n");\r
664         sprintf(text, "%sLearners Count: %d\n", text, weakCount);\r
665         sprintf(text, "%sLearners Type: ", text);\r
666         switch(weakType)\r
667         {\r
668     case 0:\r
669         sprintf(text, "%sDecision Stumps\n", text);\r
670         break;\r
671     case 1:\r
672         sprintf(text, "%sRandom Projections\n", text);\r
673         break;\r
674     case 2:\r
675                 sprintf(text, "%sRandom Rectangles\n", text);\r
676                 break;\r
677     case 3:\r
678         sprintf(text, "%sRandom Circles\n", text);\r
679         break;\r
680     case 4:\r
681         sprintf(text, "%sRandom GMM\n", text);\r
682         break;\r
683     case 5:\r
684         sprintf(text, "%sRandom SVM %d\n", text, svmCount);\r
685         break;\r
686     }\r
687         return text;\r
688 }\r