ADDED: Random GMM and Random SVM weak learners for the Boost classifier
[mldemos:baraks-mldemos.git] / _AlgorithmsPlugins / OpenCV / classifierBoost.cpp
1 /*********************************************************************\r
2 MLDemos: A User-Friendly visualization toolkit for machine learning\r
3 Copyright (C) 2010  Basilio Noris\r
4 Contact: mldemos@b4silio.com\r
5 \r
6 This library is free software; you can redistribute it and/or\r
7 modify it under the terms of the GNU Lesser General Public\r
8 License as published by the Free Software Foundation; either\r
9 version 2.1 of the License, or (at your option) any later version.\r
10 \r
11 This library is distributed in the hope that it will be useful,\r
12 but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\r
14 Library General Public License for more details.\r
15 \r
16 You should have received a copy of the GNU Lesser General Public\r
17 License along with this library; if not, write to the Free\r
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\r
19 *********************************************************************/\r
20 #include "public.h"\r
21 #include "basicMath.h"\r
22 #include "classifierBoost.h"\r
23 #include <QDebug>\r
24 \r
25 using namespace std;\r
26 \r
27 ClassifierBoost::ClassifierBoost()\r
28     : model(0), weakCount(0), scoreMultiplier(1.f), boostType(CvBoost::GENTLE)\r
29 {\r
30         bSingleClass = false;\r
31 }\r
32 \r
33 ClassifierBoost::~ClassifierBoost()\r
34 {\r
35         if(model) model->clear();\r
36         DEL(model);\r
37 }\r
38 \r
39 vector<fvec> ClassifierBoost::learners;\r
40 int ClassifierBoost::currentLearnerType = -1;\r
41 int ClassifierBoost::learnerCount=1000;\r
42 int ClassifierBoost::svmCount=2;\r
43 \r
44 void ClassifierBoost::InitLearners(fvec xMin, fvec xMax)\r
45 {\r
46     srand(1); // so we always generate the same weak learner\r
47     switch(weakType)\r
48     {\r
49     case 0: // projections\r
50         learnerCount = dim>2?1000:360;\r
51         break;\r
52     case 1: // rectangles\r
53     case 2: // circles\r
54         learnerCount = 1000;\r
55         break;\r
56     case 3: // gmm\r
57     case 4: // svm\r
58         learnerCount = 3000;\r
59         break;\r
60     }\r
61     learnerCount = max(learnerCount, (int)weakCount);\r
62 \r
63     learners.clear();\r
64     learners.resize(learnerCount);\r
65     // we generate a bunch of random directions as learners\r
66 //      srand(1);\r
67     switch(weakType)\r
68     {\r
69     case 0:// random projection\r
70     {\r
71         if(dim==2)\r
72         {\r
73             FOR(i, learnerCount)\r
74             {\r
75                 learners[i].resize(dim);\r
76                 float theta = i / (float)learnerCount * PIf;\r
77                 learners[i][0] = cosf(theta);\r
78                 learners[i][1] = sinf(theta);\r
79             }\r
80         }\r
81         else\r
82         {\r
83             FOR(i, learnerCount)\r
84             {\r
85                 learners[i].resize(dim);\r
86                 fvec projection(dim,0);\r
87                 float norm = 0;\r
88                 FOR(d, dim)\r
89                 {\r
90                     projection[d] = drand48();\r
91                     norm += projection[d];\r
92                 }\r
93                 FOR(d, dim) learners[i][d] = projection[d] / norm;\r
94             }\r
95         }\r
96     }\r
97         break;\r
98     case 2: // random circle\r
99     {\r
100         if(dim==2)\r
101         {\r
102             FOR(i, learnerCount)\r
103             {\r
104                 learners[i].resize(dim);\r
105                 learners[i][0] =  drand48()*(xMax[0]-xMin[0]) + xMin[0];\r
106                 learners[i][1] =  drand48()*(xMax[1]-xMin[1]) + xMin[1];\r
107             }\r
108         }\r
109         else\r
110         {\r
111             FOR(i, learnerCount)\r
112             {\r
113                 learners[i].resize(dim);\r
114                 FOR(d, dim) learners[i][d] = drand48()*(xMax[d]-xMin[d]) + xMin[d];\r
115             }\r
116         }\r
117     }\r
118         break;\r
119     case 1: // random rectangle\r
120     {\r
121         FOR(i, learnerCount)\r
122         {\r
123             learners[i].resize(dim*2);\r
124             FOR(d, dim)\r
125             {\r
126                 float x = drand48()*(xMax[d] - xMin[d]) + xMin[d]; // rectangle center\r
127                 float l = drand48()*(xMax[d] - xMin[d]); // width\r
128                 //float l = ((rand()+1) / (float)RAND_MAX); // width ratio\r
129                 learners[i][2*d] = x;\r
130                 learners[i][2*d+1] = l;\r
131             }\r
132         }\r
133     }\r
134         break;\r
135     case 3: // random GMM\r
136     {\r
137         FOR(i, learnerCount)\r
138         {\r
139             learners[i].resize(dim + dim*(dim+1)/2); // a center plus a covariance matrix\r
140             // we generate a random center\r
141             FOR(d, dim)\r
142             {\r
143                 learners[i][d] = drand48()*(xMax[d] - xMin[d]) + xMin[d];\r
144             }\r
145             // we generate a random covariance matrix\r
146             float minLambda = (xMax[0]-xMin[0])*0.01f; // we set the minimum covariance lambda to 1% of the data span\r
147             fvec C = RandCovMatrix(dim, minLambda);\r
148             FOR(d1, dim)\r
149             {\r
150                 FOR(d2,d1+1)\r
151                 {\r
152                     int index = d1*(d1+1)/2 + d2; // index in triangular matrix form\r
153                     learners[i][dim + index] = C[d1*dim + d2];\r
154                 }\r
155             }\r
156         }\r
157     }\r
158         break;\r
159     case 4: // random SVM\r
160     {\r
161         FOR(i, learnerCount)\r
162         {\r
163             learners[i].resize(1 + svmCount*(dim+1)); // a kernel width plus svmCount points plus svmCount alphas\r
164             learners[i][0] = 1.f / drand48()*(xMax[0]-xMin[0]); // kernel width proportional to the data\r
165             float sumAlpha=0;\r
166             FOR(j, svmCount)\r
167             {\r
168                 // we generate a random alpha\r
169                 if(j<svmCount-1) sumAlpha += (learners[i][1+(dim+1)*j] = drand48()*2.f - 1.f);\r
170                 else learners[i][1+(dim+1)*j] = -sumAlpha; // we ensure that the sum of all alphas is zero\r
171                 // and the coordinates of the SV\r
172                 FOR(d, dim)\r
173                 {\r
174                     learners[i][1+(dim+1)*j+1 + d] = drand48()*(xMax[d]-xMin[d])+xMin[d];\r
175                 }\r
176             }\r
177         }\r
178     }\r
179         break;\r
180     }\r
181     currentLearnerType = weakType;\r
182 }\r
183 \r
184 void ClassifierBoost::Train( std::vector< fvec > samples, ivec labels )\r
185 {\r
186         if(model)model->clear();\r
187         u32 sampleCnt = samples.size();\r
188         if(!sampleCnt) return;\r
189         DEL(model);\r
190         dim = samples[0].size();\r
191         u32 *perm = randPerm(sampleCnt);\r
192     this->samples = samples;\r
193     this->labels = labels;\r
194 \r
195     // we need to find the boundaries\r
196     fvec xMin(dim, FLT_MAX), xMax(dim, -FLT_MAX);\r
197     FOR(i, samples.size())\r
198     {\r
199         FOR(d,dim)\r
200         {\r
201             if(xMin[d] > samples[i][d]) xMin[d] = samples[i][d];\r
202             if(xMax[d] < samples[i][d]) xMax[d] = samples[i][d];\r
203         }\r
204     }\r
205 \r
206     // we need to regenerate the learners\r
207     if(currentLearnerType != weakType || learners.size() != learnerCount) InitLearners(xMin, xMax);\r
208 \r
209         CvMat *trainSamples = cvCreateMat(sampleCnt, learnerCount, CV_32FC1);\r
210         CvMat *trainLabels = cvCreateMat(labels.size(), 1, CV_32FC1);\r
211         CvMat *sampleWeights = cvCreateMat(samples.size(), 1, CV_32FC1);\r
212 \r
213     switch(weakType)\r
214     {\r
215     case 0:// random projection\r
216     {\r
217         if(dim == 2)\r
218         {\r
219             FOR(i, sampleCnt)\r
220             {\r
221                 fvec sample = samples[perm[i]];\r
222                 FOR(j, learnerCount)\r
223                 {\r
224                     float val = sample[0]* learners[j][0] + sample[1]* learners[j][1];\r
225                     cvSetReal2D(trainSamples, i, j, val);\r
226                 }\r
227                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
228                 cvSet1D(sampleWeights, i, cvScalar(1));\r
229             }\r
230 \r
231         }\r
232         else\r
233         {\r
234             FOR(i, sampleCnt)\r
235             {\r
236                 // project the sample in the direction of the learner\r
237                 fvec sample = samples[perm[i]];\r
238                 FOR(j, learnerCount)\r
239                 {\r
240                     float val = 0;\r
241                     FOR(d, dim) val += sample[d] * learners[j][d];\r
242                     cvSetReal2D(trainSamples, i, j, val);\r
243                 }\r
244                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
245                 cvSet1D(sampleWeights, i, cvScalar(1));\r
246             }\r
247         }\r
248     }\r
249         break;\r
250     case 2: // random circle\r
251         {\r
252                 if(dim == 2)\r
253                 {\r
254                         FOR(i, sampleCnt)\r
255                         {\r
256                 fvec sample = samples[perm[i]];\r
257                 FOR(j, learnerCount)\r
258                                 {\r
259                     float val = 0;\r
260                     val = sqrtf((sample[0] - learners[j][0])*(sample[0] - learners[j][0])+\r
261                         (sample[1] - learners[j][1])*(sample[1] - learners[j][1]));\r
262                                         cvSetReal2D(trainSamples, i, j, val);\r
263                                 }\r
264                                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
265                                 cvSet1D(sampleWeights, i, cvScalar(1));\r
266                         }\r
267 \r
268                 }\r
269                 else\r
270                 {\r
271                         FOR(i, sampleCnt)\r
272                         {\r
273                                 // project the sample in the direction of the learner \r
274                                 fvec sample = samples[perm[i]];\r
275                                 FOR(j, learnerCount)\r
276                                 {\r
277                                         float val = 0;\r
278                     FOR(d,dim) val += (sample[d] - learners[j][d])*(sample[d] - learners[j][d]);\r
279                     val = sqrtf(val);\r
280                                         cvSetReal2D(trainSamples, i, j, val);\r
281                                 }\r
282                                 cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
283                                 cvSet1D(sampleWeights, i, cvScalar(1));\r
284                         }\r
285                 }\r
286         }\r
287         break;\r
288     case 1:// random rectangles\r
289         {\r
290                 FOR(i, sampleCnt)\r
291                 {\r
292                         // check if the sample is inside the recangle generated by the classifier\r
293                         const fvec sample = samples[perm[i]];\r
294                         FOR(j, learnerCount)\r
295                         {\r
296                 float val = 1;\r
297                                 FOR(d, dim)\r
298                                 {\r
299                     if(sample[d] < learners[j][2*d] || sample[d] > learners[j][2*d]+learners[j][2*d+1])\r
300                     {\r
301                         val = 0;\r
302                         break;\r
303                     }\r
304                 }\r
305                 cvSetReal2D(trainSamples, i, j, val + drand48()*0.1); // we add a small noise to the value just to not have only 0s and 1s\r
306                         }\r
307                         cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
308                         cvSet1D(sampleWeights, i, cvScalar(1));\r
309                 }\r
310         }\r
311         break;\r
312     case 3: // random GMM\r
313     {\r
314         FOR(i, sampleCnt)\r
315         {\r
316             fvec sample = samples[perm[i]];\r
317             FOR(j, learnerCount)\r
318             {\r
319                 fvec &gmm = learners[j];\r
320                 float val = 0;\r
321                 fvec x(dim);\r
322                 FOR(d, dim) x[d] = sample[d]-gmm[d];\r
323                 FOR(d, dim)\r
324                 {\r
325                     float xC = 0;\r
326                     FOR(d1,dim)\r
327                     {\r
328                         int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
329                         xC += x[d1]*gmm[dim+index];\r
330                     }\r
331                     val += xC*x[d];\r
332                 }\r
333                 cvSetReal2D(trainSamples, i, j, val);\r
334             }\r
335             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
336             cvSet1D(sampleWeights, i, cvScalar(1));\r
337         }\r
338     }\r
339         break;\r
340     case 4: // random SVM\r
341     {\r
342         FOR(i, sampleCnt)\r
343         {\r
344             // compute the svm function\r
345             fvec sample = samples[perm[i]];\r
346             FOR(j, learnerCount)\r
347             {\r
348                 fvec &svm = learners[j];\r
349                 float val = 0;\r
350                 float gamma = svm[0];\r
351                 FOR(k, svmCount)\r
352                 {\r
353                     float alpha = svm[1+k*(dim+1)];\r
354                     // we compute the rbf kernel;\r
355                     float K = 0;\r
356                     int index = 1+k*(dim+1)+1;\r
357                     FOR(d, dim)\r
358                     {\r
359                         float dist = sample[d]-svm[index+d];\r
360                         K += dist*dist;\r
361                     }\r
362                     K *= gamma;\r
363                     val += alpha*expf(-K);\r
364                 }\r
365                 cvSetReal2D(trainSamples, i, j, val);\r
366             }\r
367             cvSet1D(trainLabels, i, cvScalar((float)labels[perm[i]]));\r
368             cvSet1D(sampleWeights, i, cvScalar(1));\r
369         }\r
370     }\r
371         break;\r
372     }\r
373 \r
374         CvMat *varType = cvCreateMat(trainSamples->width+1, 1, CV_8UC1);\r
375         FOR(i, trainSamples->width)\r
376         {\r
377                 CV_MAT_ELEM(*varType, u8, i, 0) = CV_VAR_NUMERICAL;\r
378         }\r
379         CV_MAT_ELEM(*varType, u8, trainSamples->width, 0) = CV_VAR_CATEGORICAL;\r
380 \r
381     int maxSplit = 1;\r
382     CvBoostParams params(boostType, weakCount, 0.95, maxSplit, false, NULL);\r
383         params.split_criteria = CvBoost::DEFAULT;\r
384         model = new CvBoost();\r
385         model->train(trainSamples, CV_ROW_SAMPLE, trainLabels, NULL, NULL, varType, NULL, params);\r
386 \r
387         CvSeq *predictors = model->get_weak_predictors();\r
388         int length = cvSliceLength(CV_WHOLE_SEQ, predictors);\r
389     //qDebug() << "length:" << length;\r
390         features.clear();\r
391         FOR(i, length)\r
392         {\r
393                 CvBoostTree *predictor = *CV_SEQ_ELEM(predictors, CvBoostTree*, i);\r
394                 CvDTreeSplit *split = predictor->get_root()->split;\r
395         if(!split) continue;\r
396                 features.push_back(split->var_idx);\r
397         }\r
398 \r
399     scoreMultiplier = 1.f;\r
400     float maxScore=-FLT_MAX, minScore=FLT_MAX;\r
401     FOR(i, samples.size())\r
402     {\r
403         float score = Test(samples[i]);\r
404         if(score > maxScore) maxScore = score;\r
405         if(score < minScore) minScore = score;\r
406     }\r
407     if(minScore != maxScore)\r
408     {\r
409         scoreMultiplier = 1.f/(max(abs((double)maxScore),abs((double)minScore)))*5.f;\r
410     }\r
411 \r
412     // we want to compute the error weight for each training sample\r
413     vector<fvec> responses(length);\r
414     FOR(i, length) responses[i].resize(sampleCnt);\r
415     // first we compute all the errors, for each learner\r
416     FOR(i, sampleCnt)\r
417     {\r
418         fvec response(length);\r
419         Test(samples[i], &response);\r
420         FOR(j, length) responses[j][i] = response[j];\r
421     }\r
422     // then we iterate through the learners\r
423     errorWeights = fvec(sampleCnt,1.f);\r
424     FOR(i, responses.size())\r
425     {\r
426         double sum = 0;\r
427         // we compute the current weighted error\r
428         FOR(j, sampleCnt)\r
429         {\r
430             double response = responses[i][j];\r
431             //debugString += QString("%1(%2) ").arg(response,0,'f',2).arg(labels[perm[j]]);\r
432             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) sum += fabs(response)*errorWeights[j]/sampleCnt;\r
433         }\r
434         //qDebug() << debugString;\r
435         double c = sqrtf(fabs((1-sum)/sum));\r
436 \r
437         // we update the individual weights\r
438         sum = 0;\r
439         FOR(j, sampleCnt)\r
440         {\r
441             double response = responses[i][j];\r
442             if((response < 0 && labels[j] == 1) || (response >= 0 && labels[j]!=1)) errorWeights[j] *= c;\r
443             else errorWeights[j] *= 1.f/c;\r
444             sum += errorWeights[j];\r
445         }\r
446         sum /= sampleCnt;\r
447         // and we renormalize them\r
448         FOR(j, sampleCnt) errorWeights[j] /= sum;\r
449     }\r
450 \r
451     //QString debugString;\r
452     //FOR(i, sampleCnt) debugString += QString("%1 ").arg(errorWeights[i],0,'f',3);\r
453     //qDebug() << "errorWeights" << debugString;\r
454 \r
455         cvReleaseMat(&trainSamples);\r
456         cvReleaseMat(&trainLabels);\r
457         cvReleaseMat(&sampleWeights);\r
458         cvReleaseMat(&varType);\r
459     delete [] perm;\r
460     trainSamples = 0;\r
461         trainLabels = 0;\r
462         sampleWeights = 0;\r
463         varType = 0;\r
464 }\r
465 \r
466 \r
467 float ClassifierBoost::Test( const fvec &sample )\r
468 {\r
469     return Test(sample, 0);\r
470 }\r
471 \r
472 float ClassifierBoost::Test( const fvec &sample, fvec *responses)\r
473 {\r
474     if(!model) return 0;\r
475     if(!learners.size()) return 0;\r
476 \r
477     CvMat *x = cvCreateMat(1, learners.size(), CV_32FC1);\r
478     switch(weakType)\r
479     {\r
480     case 0:\r
481     {\r
482         if(dim == 2)\r
483         {\r
484             FOR(i, features.size())\r
485             {\r
486                 float val = sample[0] * learners[features[i]][0] + sample[1] * learners[features[i]][1];\r
487                 cvSetReal2D(x, 0, features[i], val);\r
488             }\r
489         }\r
490         else\r
491         {\r
492             FOR(i, features.size())\r
493             {\r
494                 float val = sample * learners[features[i]];\r
495                 cvSetReal2D(x, 0, features[i], val);\r
496             }\r
497         }\r
498     }\r
499         break;\r
500     case 2:\r
501     {\r
502         if(dim == 2)\r
503         {\r
504             FOR(i, features.size())\r
505             {\r
506                 float val = sqrtf((sample[0] - learners[features[i]][0])*(sample[0] - learners[features[i]][0])+\r
507                     (sample[1] - learners[features[i]][1])*(sample[1] - learners[features[i]][1]));\r
508                 cvSetReal2D(x, 0, features[i], val);\r
509             }\r
510         }\r
511         else\r
512         {\r
513             FOR(i, features.size())\r
514             {\r
515                 float val = 0;\r
516                 FOR(d,dim) val += (sample[d] - learners[features[i]][d])*(sample[d] - learners[features[i]][d]);\r
517                 val = sqrtf(val);\r
518                 cvSetReal2D(x, 0, features[i], val);\r
519             }\r
520         }\r
521     }\r
522         break;\r
523     case 1:\r
524     {\r
525         FOR(i, features.size())\r
526         {\r
527             int val = 1;\r
528             FOR(d, dim)\r
529             {\r
530                 if(sample[d] < learners[features[i]][2*d] ||\r
531                     sample[d] > learners[features[i]][2*d]+learners[features[i]][2*d+1])\r
532                 {\r
533                     val = 0;\r
534                     break;\r
535                 }\r
536             }\r
537             cvSetReal2D(x, 0, features[i], val + drand48()*0.1);\r
538         }\r
539     }\r
540         break;\r
541     case 3:\r
542     {\r
543         FOR(i, features.size())\r
544         {\r
545             float val = 0;\r
546             fvec &gmm = learners[features[i]];\r
547             fvec xt(dim);\r
548             FOR(d, dim) xt[d] = sample[d]-gmm[d];\r
549             FOR(d, dim)\r
550             {\r
551                 float xC = 0;\r
552                 FOR(d1,dim)\r
553                 {\r
554                     int index = d1>d? d1*(d1+1)/2 + d : d*(d+1)/2 + d1;\r
555                     xC += xt[d1]*gmm[dim+index];\r
556                 }\r
557                 val += xC*xt[d];\r
558             }\r
559             cvSetReal2D(x, 0, features[i], val);\r
560         }\r
561     }\r
562         break;\r
563     case 4:\r
564     {\r
565         FOR(i, features.size())\r
566         {\r
567             fvec &svm = learners[features[i]];\r
568             float val = 0;\r
569             float gamma = svm[0];\r
570             FOR(k, svmCount)\r
571             {\r
572                 float alpha = svm[1+k*(dim+1)];\r
573                 // we compute the rbf kernel;\r
574                 float K = 0;\r
575                 int index = 1+k*(dim+1)+1;\r
576                 FOR(d, dim)\r
577                 {\r
578                     float dist = sample[d]-svm[index+d];\r
579                     K += dist*dist;\r
580                 }\r
581                 K *= gamma;\r
582                 val += alpha*expf(-K);\r
583             }\r
584             cvSetReal2D(x, 0, features[i], val);\r
585         }\r
586     }\r
587         break;\r
588     }\r
589 \r
590     // allocate memory for weak learner output\r
591     int length = cvSliceLength(CV_WHOLE_SEQ, model->get_weak_predictors());\r
592     CvMat *weakResponses = cvCreateMat(length, 1, CV_32FC1);\r
593     float y = model->predict(x, NULL, weakResponses, CV_WHOLE_SEQ);\r
594 \r
595     if(responses != NULL)\r
596     {\r
597         (*responses).resize(length);\r
598         FOR(i, length) (*responses)[i] = cvGet1D(weakResponses, i).val[0];\r
599     }\r
600     double score = cvSum(weakResponses).val[0] * scoreMultiplier;\r
601 \r
602     cvReleaseMat(&weakResponses);\r
603     cvReleaseMat(&x);\r
604     return score;\r
605 }\r
606 \r
607 void ClassifierBoost::SetParams( u32 weakCount, int weakType, int boostType, int svmCount)\r
608 {\r
609         this->weakCount = weakCount;\r
610     this->weakType = weakType;\r
611     this->boostType = boostType;\r
612     if(this->svmCount != svmCount)\r
613     {\r
614         // if we changed the number of svms we need to regenerate the learners\r
615         this->svmCount = svmCount;\r
616         if(weakType == 4) currentLearnerType = -1;\r
617     }\r
618 }\r
619 \r
620 const char *ClassifierBoost::GetInfoString()\r
621 {\r
622         char *text = new char[1024];\r
623         sprintf(text, "Boosting\n");\r
624         sprintf(text, "%sLearners Count: %d\n", text, weakCount);\r
625         sprintf(text, "%sLearners Type: ", text);\r
626         switch(weakType)\r
627         {\r
628         case 0:\r
629                 sprintf(text, "%sRandom Projections\n", text);\r
630                 break;\r
631         case 1:\r
632                 sprintf(text, "%sRandom Rectangles\n", text);\r
633                 break;\r
634     case 2:\r
635         sprintf(text, "%sRandom Circles\n", text);\r
636         break;\r
637     case 3:\r
638         sprintf(text, "%sRandom GMM\n", text);\r
639         break;\r
640     case 4:\r
641         sprintf(text, "%sRandom SVM %d\n", text, svmCount);\r
642         break;\r
643     }\r
644         return text;\r
645 }\r