const-fixing functions
[mldemos:baraks-mldemos.git] / _AlgorithmsPlugins / OpenCV / classifierBoost.cpp
1 /*********************************************************************\r
2 MLDemos: A User-Friendly visualization toolkit for machine learning\r
3 Copyright (C) 2010  Basilio Noris\r
4 Contact: mldemos@b4silio.com\r
5 \r
6 This library is free software; you can redistribute it and/or\r
7 modify it under the terms of the GNU Lesser General Public\r
8 License as published by the Free Software Foundation; either\r
9 version 2.1 of the License, or (at your option) any later version.\r
10 \r
11 This library is distributed in the hope that it will be useful,\r
12 but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
14 Library General Public License for more details.\r
15 \r
16 You should have received a copy of the GNU Lesser General Public\r
17 License along with this library; if not, write to the Free\r
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\r
19 *********************************************************************/\r
20 #include "public.h"\r
21 #include "basicMath.h"\r
22 #include "classifierBoost.h"\r
23 #include <QDebug>\r
24 \r
25 using namespace std;\r
26 \r
27 ClassifierBoost::ClassifierBoost()\r
28     : model(0), weakCount(0), scoreMultiplier(1.f), boostType(CvBoost::GENTLE)\r
29 {\r
30         bSingleClass = false;\r
31 }\r
32 \r
33 ClassifierBoost::~ClassifierBoost()\r
34 {\r
35         if(model) model->clear();\r
36         DEL(model);\r
37 }\r
38 \r
39 vector<fvec> ClassifierBoost::learners;\r
40 int ClassifierBoost::currentLearnerType = -1;\r
41 int ClassifierBoost::learnerCount=1000;\r
42 int ClassifierBoost::svmCount=2;\r
43 \r
44 void ClassifierBoost::InitLearners(fvec xMin, fvec xMax)\r
45 {\r
46     srand(1); // so we always generate the same weak learner\r
47     switch(weakType)\r
48     {\r
49     case 0: // stumps\r
50         learnerCount = dim;\r
51         break;\r
52     case 1: // projections\r
53         learnerCount = dim>2?1000:360;\r
54         break;\r
55     case 2: // rectangles\r
56     case 3: // circles\r
57     case 4: // gmm\r
58     case 5: // svm\r
59         learnerCount = 3000;\r
60         break;\r
61     }\r
62     learnerCount = max(learnerCount, (int)weakCount);\r
63 \r
64     learners.clear();\r
65     learners.resize(learnerCount);\r
66     // we generate a bunch of random directions as learners\r
67 //      srand(1);\r
68     switch(weakType)\r
69     {\r
70     case 0:// stumps\r
71     {\r
72         FOR(i, learnerCount)\r
73         {\r
74             learners[i].resize(1);\r
75             learners[i][0] = i % dim; // we choose a single dimension\r
76         }\r
77     }\r
78         break;\r
79     case 1:// random projection\r
80     {\r
81         if(dim==2)\r
82         {\r
83             FOR(i, learnerCount)\r
84             {\r
85                 learners[i].resize(dim);\r
86                 float theta = i / (float)learnerCount * PIf;\r
87                 learners[i][0] = cosf(theta);\r
88                 learners[i][1] = sinf(theta);\r
89             }\r
90         }\r
91         else\r
92         {\r
93             FOR(i, learnerCount)\r
94             {\r
95                 learners[i].resize(dim);\r
96                 fvec projection(dim,0);\r
97                 float norm = 0;\r
98                 FOR(d, dim)\r
99                 {\r
100                     projection[d] = drand48();\r
101                     norm += projection[d];\r
102                 }\r
103                 FOR(d, dim) learners[i][d] = projection[d] / norm;\r
104             }\r
105         }\r
106     }\r
107         break;\r
108     case 2: // random rectangle\r
109     {\r
110         FOR(i, learnerCount)\r
111         {\r
112             learners[i].resize(dim*2);\r
113             FOR(d, dim)\r
114             {\r
115                 float x = drand48()*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
116                 //float x = (drand48()*2-0.5)*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
117                 float l = drand48()*(xMax[d] - xMin[d]); // width\r
118                 //float x = drand48()*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
119                 //float l = drand48()*(xMax[d] - xMin[d]); // width\r
120                 learners[i][2*d] = x;\r
121                 learners[i][2*d+1] = l;\r
122             }\r
123         }\r
124     }\r
125         break;\r
126     case 3: // random circle\r
127     {\r
128         if(dim==2)\r
129         {\r
130             FOR(i, learnerCount)\r
131             {\r
132                 learners[i].resize(dim);\r
133                 learners[i][0] =  drand48()*(xMax[0]-xMin[0]) + xMin[0];\r
134                 learners[i][1] =  drand48()*(xMax[1]-xMin[1]) + xMin[1];\r
135             }\r
136         }\r
137         else\r
138         {\r
139             FOR(i, learnerCount)\r
140             {\r
141                 learners[i].resize(dim);\r
142                 FOR(d, dim) learners[i][d] = drand48()*(xMax[d]-xMin[d]) + xMin[d];\r
143             }\r
144         }\r
145     }\r
146         break;\r
147     case 4: // random GMM\r
148     {\r
149         FOR(i, learnerCount)\r
150         {\r
151             learners[i].resize(dim + dim*(dim+1)/2); // a center plus a covariance matrix\r
152             // we generate a random center\r
153             FOR(d, dim)\r
154             {\r
155                 learners[i][d] = drand48()*(xMax[d] - xMin[d]) + xMin[d];\r
156             }\r
157             // we generate a random covariance matrix\r
158             float minLambda = (xMax[0]-xMin[0])*0.01f; // we set the minimum covariance lambda to 1% of the data span\r
159             fvec C = RandCovMatrix(dim, minLambda);\r
160             FOR(d1, dim)\r
161             {\r
162                 FOR(d2,d1+1)\r
163                 {\r
164                     int index = d1*(d1+1)/2 + d2; // index in triangular matrix form\r
165                     learners[i][dim + index] = C[d1*dim + d2];\r
166                 }\r
167             }\r
168         }\r
169     }\r
170         break;\r
171     case 5: // random SVM\r
172     {\r
173         FOR(i, learnerCount)\r
174         {\r
175             learners[i].resize(1 + svmCount*(dim+1)); // a kernel width plus svmCount points plus svmCount alphas\r
176             learners[i][0] = 1.f / drand48()*(xMax[0]-xMin[0]); // kernel width proportional to the data\r
177             float sumAlpha=0;\r
178             FOR(j, svmCount)\r
179             {\r
180                 // we generate a random alpha\r
181                 if(j<svmCount-1) sumAlpha += (learners[i][1+(dim+1)*j] = drand48()*2.f - 1.f);\r
182                 else learners[i][1+(dim+1)*j] = -sumAlpha; // we ensure that the sum of all alphas is zero\r
183                 // and the coordinates of the SV\r
184                 FOR(d, dim)\r
185                 {\r
186                     learners[i][1+(dim+1)*j+1 + d] = drand48()*(xMax[d]-xMin[d])+xMin[d];\r
187                 }\r
188             }\r
189         }\r
190     }\r
191         break;\r
192     }\r
193     currentLearnerType = weakType;\r
194 }\r
195 \r
196 void ClassifierBoost::Train( std::vector< fvec > samples, ivec labels )\r
197 {\r
198         if(model)model->clear();\r
199         u32 sampleCnt = samples.size();\r
200     if(!sampleCnt) return;\r
201     if(sampleCnt < 16)\r
202     {\r
203         vector<fvec> copy = samples;\r
204         ivec lcopy = labels;\r
205         while(sampleCnt < 16)\r
206         {\r
207             samples.insert(samples.end(), copy.begin(), copy.end());\r
208             labels.insert(labels.end(), lcopy.begin(), lcopy.end());\r
209             sampleCnt = samples.size();\r
210         }\r
211     }\r
212         DEL(model);\r
213         dim = samples[0].size();\r
214         u32 *perm = randPerm(sampleCnt);\r
215     this->samples = samples;\r
216     this->labels = labels;\r
217 \r
218     // we need to find the boundaries\r
219     fvec xMin(dim, FLT_MAX), xMax(dim, -FLT_MAX);\r
220     FOR(i, samples.size())\r
221     {\r
222         FOR(d,dim)\r
223         {\r
224             if(xMin[d] > samples[i][d]) xMin[d] = samples[i][d];\r
225             if(xMax[d] < samples[i][d]) xMax[d] = samples[i][d];\r
226         }\r
227     }\r
228 \r
229     // we need to regenerate the learners\r
230     if(currentLearnerType != weakType || learners.size() != learnerCount) InitLearners(xMin, xMax);\r
231 \r
232     qDebug() << "generating learners";\r
233     CvMat *trainSamples = cvCreateMat(sampleCnt, learnerCount, CV_32FC1);\r
234         CvMat *trainLabels = cvCreateMat(labels.size(), 1, CV_32FC1);\r
235         CvMat *sampleWeights = cvCreateMat(samples.size(), 1, CV_32FC1);\r
236 \r
237     switch(weakType)\r
238     {\r
239     case 0:// stumps\r
240     {\r
241         FOR(i, sampleCnt)\r
242         {\r
243             fvec sample = samples[perm[i]];\r
244             FOR(j, learnerCount)\r
245             {\r
246                 int index = learners[j][0];\r
247                 float val = index < dim ? sample[index] : 0;\r
248                 cvSetReal2D(trainSamples, i, j, val);\r
249             }\r
250             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
251             cvSet1D(sampleWeights, i, cvScalar(1));\r
252         }\r
253     }\r
254         break;\r
255     case 1:// random projection\r
256     {\r
257         if(dim == 2)\r
258         {\r
259             FOR(i, sampleCnt)\r
260             {\r
261                 fvec sample = samples[perm[i]];\r
262                 FOR(j, learnerCount)\r
263                 {\r
264                     float val = sample[0]* learners[j][0] + sample[1]* learners[j][1];\r
265                     cvSetReal2D(trainSamples, i, j, val);\r
266                 }\r
267                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
268                 cvSet1D(sampleWeights, i, cvScalar(1));\r
269             }\r
270 \r
271         }\r
272         else\r
273         {\r
274             FOR(i, sampleCnt)\r
275             {\r
276                 // project the sample in the direction of the learner\r
277                 fvec sample = samples[perm[i]];\r
278                 FOR(j, learnerCount)\r
279                 {\r
280                     float val = 0;\r
281                     FOR(d, dim) val += sample[d] * learners[j][d];\r
282                     cvSetReal2D(trainSamples, i, j, val);\r
283                 }\r
284                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
285                 cvSet1D(sampleWeights, i, cvScalar(1));\r
286             }\r
287         }\r
288     }\r
289         break;\r
290     case 2:// random rectangles\r
291         {\r
292                 FOR(i, sampleCnt)\r
293                 {\r
294                         // check if the sample is inside the recangle generated by the classifier\r
295             const fvec sample = samples[perm[i]];\r
296                         FOR(j, learnerCount)\r
297                         {\r
298                 float val = 1;\r
299                                 FOR(d, dim)\r
300                                 {\r
301                     if(sample[d] < learners[j][2*d] || sample[d] > learners[j][2*d]+learners[j][2*d+1])\r
302                     {\r
303                         val = 0;\r
304                         break;\r
305                     }\r
306                 }\r
307                 //cvSetReal2D(trainSamples, i, j, val); // we add a small noise to the value just to not have only 0s and 1s\r
308                 cvSetReal2D(trainSamples, i, j, val + drand48()*0.01); // we add a small noise to the value just to not have only 0s and 1s\r
309             }\r
310                         cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
311                         cvSet1D(sampleWeights, i, cvScalar(1));\r
312                 }\r
313         }\r
314         break;\r
315     case 3: // random circle\r
316     {\r
317         if(dim == 2)\r
318         {\r
319             FOR(i, sampleCnt)\r
320             {\r
321                 fvec sample = samples[perm[i]];\r
322                 FOR(j, learnerCount)\r
323                 {\r
324                     float val = 0;\r
325                     val = sqrtf((sample[0] - learners[j][0])*(sample[0] - learners[j][0])+\r
326                         (sample[1] - learners[j][1])*(sample[1] - learners[j][1]));\r
327                     cvSetReal2D(trainSamples, i, j, val);\r
328                 }\r
329                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
330                 cvSet1D(sampleWeights, i, cvScalar(1));\r
331             }\r
332 \r
333         }\r
334         else\r
335         {\r
336             FOR(i, sampleCnt)\r
337             {\r
338                 // project the sample in the direction of the learner\r
339                 fvec sample = samples[perm[i]];\r
340                 FOR(j, learnerCount)\r
341                 {\r
342                     float val = 0;\r
343                     FOR(d,dim) val += (sample[d] - learners[j][d])*(sample[d] - learners[j][d]);\r
344                     val = sqrtf(val);\r
345                     cvSetReal2D(trainSamples, i, j, val);\r
346                 }\r
347                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
348                 cvSet1D(sampleWeights, i, cvScalar(1));\r
349             }\r
350         }\r
351     }\r
352         break;\r
353     case 4: // random GMM\r
354     {\r
355         FOR(i, sampleCnt)\r
356         {\r
357             fvec sample = samples[perm[i]];\r
358             FOR(j, learnerCount)\r
359             {\r
360                 fvec &gmm = learners[j];\r
361                 float val = 0;\r
362                 fvec x(dim);\r
363                 FOR(d, dim) x[d] = sample[d]-gmm[d];\r
364                 FOR(d, dim)\r
365                 {\r
366                     float xC = 0;\r
367                     FOR(d1,dim)\r
368                     {\r
369                         int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
370                         xC += x[d1]*gmm[dim+index];\r
371                     }\r
372                     val += xC*x[d];\r
373                 }\r
374                 cvSetReal2D(trainSamples, i, j, val);\r
375             }\r
376             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
377             cvSet1D(sampleWeights, i, cvScalar(1));\r
378         }\r
379     }\r
380         break;\r
381     case 5: // random SVM\r
382     {\r
383         FOR(i, sampleCnt)\r
384         {\r
385             // compute the svm function\r
386             fvec sample = samples[perm[i]];\r
387             FOR(j, learnerCount)\r
388             {\r
389                 fvec &svm = learners[j];\r
390                 float val = 0;\r
391                 float gamma = svm[0];\r
392                 FOR(k, svmCount)\r
393                 {\r
394                     float alpha = svm[1+k*(dim+1)];\r
395                     // we compute the rbf kernel;\r
396                     float K = 0;\r
397                     int index = 1+k*(dim+1)+1;\r
398                     FOR(d, dim)\r
399                     {\r
400                         float dist = sample[d]-svm[index+d];\r
401                         K += dist*dist;\r
402                     }\r
403                     K *= gamma;\r
404                     val += alpha*expf(-K);\r
405                 }\r
406                 cvSetReal2D(trainSamples, i, j, val);\r
407             }\r
408             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
409             cvSet1D(sampleWeights, i, cvScalar(1));\r
410         }\r
411     }\r
412         break;\r
413     }\r
414     qDebug() << "creating data";\r
415 \r
416         CvMat *varType = cvCreateMat(trainSamples->width+1, 1, CV_8UC1);\r
417         FOR(i, trainSamples->width)\r
418         {\r
419                 CV_MAT_ELEM(*varType, u8, i, 0) = CV_VAR_NUMERICAL;\r
420         }\r
421         CV_MAT_ELEM(*varType, u8, trainSamples->width, 0) = CV_VAR_CATEGORICAL;\r
422 \r
423     int maxSplit = 1;\r
424     CvBoostParams params(boostType, weakCount, 0.95, maxSplit, false, NULL);\r
425         params.split_criteria = CvBoost::DEFAULT;\r
426         model = new CvBoost();\r
427     qDebug() << "training with " << samples.size() << "samples";\r
428     qDebug() << "trainSamples" << trainSamples->rows << trainSamples->cols;\r
429     qDebug() << "trainLabels" << trainLabels->rows << trainLabels->cols;\r
430         model->train(trainSamples, CV_ROW_SAMPLE, trainLabels, NULL, NULL, varType, NULL, params);\r
431     qDebug() << "done";\r
432 \r
433         CvSeq *predictors = model->get_weak_predictors();\r
434         int length = cvSliceLength(CV_WHOLE_SEQ, predictors);\r
435     //qDebug() << "length:" << length;\r
436         features.clear();\r
437         FOR(i, length)\r
438         {\r
439                 CvBoostTree *predictor = *CV_SEQ_ELEM(predictors, CvBoostTree*, i);\r
440                 CvDTreeSplit *split = predictor->get_root()->split;\r
441         if(!split) continue;\r
442                 features.push_back(split->var_idx);\r
443         }\r
444 \r
445     scoreMultiplier = 1.f;\r
446     float maxScore=-FLT_MAX, minScore=FLT_MAX;\r
447     FOR(i, samples.size())\r
448     {\r
449         float score = Test(samples[i]);\r
450         if(score > maxScore) maxScore = score;\r
451         if(score < minScore) minScore = score;\r
452     }\r
453     if(minScore != maxScore)\r
454     {\r
455         scoreMultiplier = 1.f/(max(abs((double)maxScore),abs((double)minScore)))*5.f;\r
456     }\r
457 \r
458     // we want to compute the error weight for each training sample\r
459     vector<fvec> responses(length);\r
460     FOR(i, length) responses[i].resize(sampleCnt);\r
461     // first we compute all the errors, for each learner\r
462     FOR(i, sampleCnt)\r
463     {\r
464         fvec response(length);\r
465         Test(samples[i], &response);\r
466         FOR(j, length) responses[j][i] = response[j];\r
467     }\r
468     // then we iterate through the learners\r
469     errorWeights = fvec(sampleCnt,1.f);\r
470     FOR(i, responses.size())\r
471     {\r
472         double sum = 0;\r
473         // we compute the current weighted error\r
474         FOR(j, sampleCnt)\r
475         {\r
476             double response = responses[i][j];\r
477             //debugString += QString("%1(%2) ").arg(response,0,'f',2).arg(labels[perm[j]]);\r
478             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) sum += fabs(response)*errorWeights[j]/sampleCnt;\r
479         }\r
480         //qDebug() << debugString;\r
481         double c = sqrtf(fabs((1-sum)/sum));\r
482 \r
483         // we update the individual weights\r
484         sum = 0;\r
485         FOR(j, sampleCnt)\r
486         {\r
487             double response = responses[i][j];\r
488             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) errorWeights[j] *= c;\r
489             else errorWeights[j] *= 1.f/c;\r
490             sum += errorWeights[j];\r
491         }\r
492         sum /= sampleCnt;\r
493         // and we renormalize them\r
494         FOR(j, sampleCnt) errorWeights[j] /= sum;\r
495     }\r
496 \r
497     //QString debugString;\r
498     //FOR(i, sampleCnt) debugString += QString("%1 ").arg(errorWeights[i],0,'f',3);\r
499     //qDebug() << "errorWeights" << debugString;\r
500 \r
501         cvReleaseMat(&trainSamples);\r
502         cvReleaseMat(&trainLabels);\r
503         cvReleaseMat(&sampleWeights);\r
504         cvReleaseMat(&varType);\r
505     delete [] perm;\r
506     trainSamples = 0;\r
507         trainLabels = 0;\r
508         sampleWeights = 0;\r
509         varType = 0;\r
510 }\r
511 \r
512 \r
513 float ClassifierBoost::Test( const fvec &sample ) const\r
514 {\r
515     return Test(sample, 0);\r
516 }\r
517 \r
518 float ClassifierBoost::Test( const fvec &sample, fvec *responses) const\r
519 {\r
520     if(!model) return 0;\r
521     if(!learners.size()) return 0;\r
522 \r
523     CvMat *x = cvCreateMat(1, learners.size(), CV_32FC1);\r
524     switch(weakType)\r
525     {\r
526     case 0:\r
527     {\r
528         FOR(i, features.size())\r
529         {\r
530             int index = learners[features[i]][0];\r
531             float val = index < dim ? sample[index] : 0;\r
532             cvSetReal2D(x, 0, features[i], val);\r
533         }\r
534     }\r
535         break;\r
536     case 1:\r
537     {\r
538         if(dim == 2)\r
539         {\r
540             FOR(i, features.size())\r
541             {\r
542                 float val = sample[0] * learners[features[i]][0] + sample[1] * learners[features[i]][1];\r
543                 cvSetReal2D(x, 0, features[i], val);\r
544             }\r
545         }\r
546         else\r
547         {\r
548             FOR(i, features.size())\r
549             {\r
550                 float val = sample * learners[features[i]];\r
551                 cvSetReal2D(x, 0, features[i], val);\r
552             }\r
553         }\r
554     }\r
555         break;\r
556     case 2:\r
557     {\r
558         FOR(i, features.size())\r
559         {\r
560             int val = 1;\r
561             FOR(d, dim)\r
562             {\r
563                 if(sample[d] < learners[features[i]][2*d] ||\r
564                     sample[d] > learners[features[i]][2*d]+learners[features[i]][2*d+1])\r
565                 {\r
566                     val = 0;\r
567                     break;\r
568                 }\r
569             }\r
570             //cvSetReal2D(x, 0, features[i], val);\r
571             cvSetReal2D(x, 0, features[i], val + drand48()*0.1);\r
572         }\r
573     }\r
574         break;\r
575     case 3:\r
576     {\r
577         if(dim == 2)\r
578         {\r
579             FOR(i, features.size())\r
580             {\r
581                 float val = sqrtf((sample[0] - learners[features[i]][0])*(sample[0] - learners[features[i]][0])+\r
582                     (sample[1] - learners[features[i]][1])*(sample[1] - learners[features[i]][1]));\r
583                 cvSetReal2D(x, 0, features[i], val);\r
584             }\r
585         }\r
586         else\r
587         {\r
588             FOR(i, features.size())\r
589             {\r
590                 float val = 0;\r
591                 FOR(d,dim) val += (sample[d] - learners[features[i]][d])*(sample[d] - learners[features[i]][d]);\r
592                 val = sqrtf(val);\r
593                 cvSetReal2D(x, 0, features[i], val);\r
594             }\r
595         }\r
596     }\r
597         break;\r
598     case 4:\r
599     {\r
600         FOR(i, features.size())\r
601         {\r
602             float val = 0;\r
603             fvec &gmm = learners[features[i]];\r
604             fvec xt(dim);\r
605             FOR(d, dim) xt[d] = sample[d]-gmm[d];\r
606             FOR(d, dim)\r
607             {\r
608                 float xC = 0;\r
609                 FOR(d1,dim)\r
610                 {\r
611                     int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
612                     xC += xt[d1]*gmm[dim+index];\r
613                 }\r
614                 val += xC*xt[d];\r
615             }\r
616             cvSetReal2D(x, 0, features[i], val);\r
617         }\r
618     }\r
619         break;\r
620     case 5:\r
621     {\r
622         FOR(i, features.size())\r
623         {\r
624             fvec &svm = learners[features[i]];\r
625             float val = 0;\r
626             float gamma = svm[0];\r
627             FOR(k, svmCount)\r
628             {\r
629                 float alpha = svm[1+k*(dim+1)];\r
630                 // we compute the rbf kernel;\r
631                 float K = 0;\r
632                 int index = 1+k*(dim+1)+1;\r
633                 FOR(d, dim)\r
634                 {\r
635                     float dist = sample[d]-svm[index+d];\r
636                     K += dist*dist;\r
637                 }\r
638                 K *= gamma;\r
639                 val += alpha*expf(-K);\r
640             }\r
641             cvSetReal2D(x, 0, features[i], val);\r
642         }\r
643     }\r
644         break;\r
645     }\r
646 \r
647     // allocate memory for weak learner output\r
648     int length = cvSliceLength(CV_WHOLE_SEQ, model->get_weak_predictors());\r
649     CvMat *weakResponses = cvCreateMat(length, 1, CV_32FC1);\r
650     float y = model->predict(x, NULL, weakResponses, CV_WHOLE_SEQ);\r
651 \r
652     if(responses != NULL)\r
653     {\r
654         (*responses).resize(length);\r
655         FOR(i, length) (*responses)[i] = cvGet1D(weakResponses, i).val[0];\r
656     }\r
657     double score = cvSum(weakResponses).val[0] * scoreMultiplier;\r
658 \r
659     cvReleaseMat(&weakResponses);\r
660     cvReleaseMat(&x);\r
661     return score;\r
662 }\r
663 \r
664 void ClassifierBoost::SetParams( u32 weakCount, int weakType, int boostType, int svmCount)\r
665 {\r
666         this->weakCount = weakCount;\r
667     this->weakType = weakType;\r
668     this->boostType = boostType;\r
669     if(this->svmCount != svmCount)\r
670     {\r
671         // if we changed the number of svms we need to regenerate the learners\r
672         this->svmCount = svmCount;\r
673         if(weakType == 5) currentLearnerType = -1;\r
674     }\r
675 }\r
676 \r
677 const char *ClassifierBoost::GetInfoString() const\r
678 {\r
679         char *text = new char[1024];\r
680         sprintf(text, "Boosting\n");\r
681         sprintf(text, "%sLearners Count: %d\n", text, weakCount);\r
682         sprintf(text, "%sLearners Type: ", text);\r
683         switch(weakType)\r
684         {\r
685     case 0:\r
686         sprintf(text, "%sDecision Stumps\n", text);\r
687         break;\r
688     case 1:\r
689         sprintf(text, "%sRandom Projections\n", text);\r
690         break;\r
691     case 2:\r
692                 sprintf(text, "%sRandom Rectangles\n", text);\r
693                 break;\r
694     case 3:\r
695         sprintf(text, "%sRandom Circles\n", text);\r
696         break;\r
697     case 4:\r
698         sprintf(text, "%sRandom GMM\n", text);\r
699         break;\r
700     case 5:\r
701         sprintf(text, "%sRandom SVM %d\n", text, svmCount);\r
702         break;\r
703     }\r
704         return text;\r
705 }\r