001/**
002 * Copyright (c) 2011, The University of Southampton and the individual contributors.
003 * All rights reserved.
004 *
005 * Redistribution and use in source and binary forms, with or without modification,
006 * are permitted provided that the following conditions are met:
007 *
008 *   *  Redistributions of source code must retain the above copyright notice,
009 *      this list of conditions and the following disclaimer.
010 *
011 *   *  Redistributions in binary form must reproduce the above copyright notice,
012 *      this list of conditions and the following disclaimer in the documentation
013 *      and/or other materials provided with the distribution.
014 *
015 *   *  Neither the name of the University of Southampton nor the names of its
016 *      contributors may be used to endorse or promote products derived from this
017 *      software without specific prior written permission.
018 *
019 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
020 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
021 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
022 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
023 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
024 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
025 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
026 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
027 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
028 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
029 */
030package org.openimaj.image.feature.local.interest;
031
032import java.io.IOException;
033import java.util.ArrayList;
034import java.util.List;
035
036import org.apache.log4j.BasicConfigurator;
037import org.apache.log4j.Level;
038import org.apache.log4j.Logger;
039import org.openimaj.image.DisplayUtilities;
040import org.openimaj.image.FImage;
041import org.openimaj.image.ImageUtilities;
042import org.openimaj.image.MBFImage;
043import org.openimaj.image.colour.RGBColour;
044import org.openimaj.image.pixel.FValuePixel;
045import org.openimaj.image.pixel.Pixel;
046import org.openimaj.image.processing.convolution.FConvolution;
047import org.openimaj.image.processing.convolution.FGaussianConvolve;
048import org.openimaj.image.processing.resize.ResizeProcessor;
049import org.openimaj.image.processing.transform.FProjectionProcessor;
050import org.openimaj.math.geometry.point.Point2dImpl;
051import org.openimaj.math.geometry.shape.Ellipse;
052import org.openimaj.math.geometry.shape.EllipseUtilities;
053import org.openimaj.math.geometry.shape.Rectangle;
054import org.openimaj.math.matrix.EigenValueVectorPair;
055import org.openimaj.math.matrix.MatrixUtils;
056
057import Jama.Matrix;
058
059/**
060 * Using an underlying feature detector, adapt the ellipse detected to result in a more
061 * stable region according to work by http://www.robots.ox.ac.uk/~vgg/research/affine/
062 * @author Sina Samangooei (ss@ecs.soton.ac.uk)
063 *
064 */
065public class AffineAdaption implements InterestPointDetector<EllipticInterestPointData>{
066        private static final FImage LAPLACIAN_KERNEL = new FImage(new float[][] {{2, 0, 2}, {0, -8, 0}, {2, 0, 2}});
067        private static final FConvolution LAPLACIAN_KERNEL_CONV = new FConvolution(LAPLACIAN_KERNEL);
068//      private static final FImage DX_KERNEL = new FImage(new float[][] {{-1, 0, 1}});
069//      private static final FImage DY_KERNEL = new FImage(new float[][] {{-1}, {0}, {1}});
070        
071        static Logger logger = Logger.getLogger(AffineAdaption.class);
072        static{
073                
074                BasicConfigurator.configure();
075                logger.setLevel(Level.INFO);
076        }
077
078        private AbstractStructureTensorIPD  internal;
079        private AbstractStructureTensorIPD  initial;
080        private List<EllipticInterestPointData> points;
081
082        private IPDSelectionMode initialMode;
083
084        private boolean fastDifferentiationScale = false;
085        
086        /**
087         * instatiate with a {@link HarrisIPD} detector with scales 2.0f and 2.0f * 1.4f. Selection 100 points
088         */
089        public AffineAdaption(){
090                this(new HarrisIPD(2.0f,2.0f*1.4f),new IPDSelectionMode.Count(100));
091        }
092        
093        /**
094         * specify the internal detector and the selection mode
095         * @param internal
096         * @param initialSelectionMode
097         */
098        public AffineAdaption(AbstractStructureTensorIPD internal, IPDSelectionMode initialSelectionMode){
099                this(internal,internal.clone(),initialSelectionMode);
100        }
101        
102        /**
103         * set both the internal and intitial feature detectors (possibly different settings of the same detector) and a selection mode
104         * @param internal
105         * @param initial
106         * @param initialSelectionMode
107         */
108        public AffineAdaption(AbstractStructureTensorIPD internal, AbstractStructureTensorIPD initial,IPDSelectionMode initialSelectionMode){
109                this.internal = internal;
110                this.initial = initial;
111                
112                this.initialMode = initialSelectionMode;
113                this.points = new ArrayList<EllipticInterestPointData>();
114        }
115        
116        @Override
117        public void findInterestPoints(FImage image){
118                findInterestPoints(image, image.getBounds());
119        }
120        
121        @Override
122        public void findInterestPoints(FImage image, Rectangle window) {
123                this.points  = new ArrayList<EllipticInterestPointData>();
124                initial.findInterestPoints(image,window);
125//              if(logger.getLevel() == Level.INFO)
126//                      initial.printStructureTensorStats();
127                List<InterestPointData> a = initialMode.selectPoints(initial);
128                
129                logger.info("Found " + a.size() + " features at sd/si: " + initial.detectionScale + "/" + initial.integrationScale);
130                
131                for (InterestPointData d : a) {
132                        EllipticInterestPointData kpt = new EllipticInterestPointData();
133//                      InterestPointData d = new InterestPointData();
134//                      d.x = 102;
135//                      d.y = 396;
136                        kpt.scale = initial.getIntegrationScale();
137                        kpt.x = d.x;
138                        kpt.y = d.y;
139                        
140                        boolean converge = calcAffineAdaptation(image, kpt, internal.clone());
141                        if(converge)
142                        {
143                                logger.debug("Keypoint at: " + d.x + ", " + d.y);
144                                logger.debug("... converged: "+ converge);
145                                this.points.add(kpt);
146                        }
147                        
148                }
149        }
150        
151        @Override
152        public List<EllipticInterestPointData> getInterestPoints(int npoints) {
153                if(points == null) return null;
154                if(npoints < 0) npoints = this.points.size();
155                return this.points.subList(0, npoints < this.points.size() ? npoints : this.points.size());
156        }
157
158        @Override
159        public List<EllipticInterestPointData> getInterestPoints(float threshold) {
160                List<EllipticInterestPointData> validPoints = new ArrayList<EllipticInterestPointData>();
161                for(EllipticInterestPointData  point : this.points){
162                        if(point.score > threshold){
163                                validPoints.add(point);
164                        }
165                }
166                return validPoints;
167        }
168
169        @Override
170        public List<EllipticInterestPointData> getInterestPoints() {
171                return this.points;
172        }
173
174        @Override
175        public void setDetectionScale(float detectionScaleVariance) {
176                this.initial.setDetectionScale(detectionScaleVariance);
177//              this.internal.setDetectionScaleVariance(detectionScaleVariance);
178        }
179        
180        @Override
181        public void setIntegrationScale(float integrationScaleVariance) {
182                this.initial.setIntegrationScale(integrationScaleVariance);
183        }
184        
185        /*
186         * Calculates second moments matrix in point p
187         */
188//      Matrix calcSecondMomentMatrix(final FImage dx2, final FImage dxy, final FImage dy2, Pixel p) {
189//              int x = p.x;
190//              int y = p.y;
191//
192//              Matrix M = new Matrix(2, 2);
193//              M.set(0, 0, dx2.pixels[y][x]);
194//              M.set(0, 1, dxy.pixels[y][x]);
195//              M.set(1, 0, dxy.pixels[y][x]);
196//              M.set(1, 1, dy2.pixels[y][x]);
197//              
198//              return M;
199//      }
200        Matrix calcSecondMomentMatrix(AbstractStructureTensorIPD ipd, int x, int y) {
201                
202                return ipd.getSecondMomentsAt(x, y);
203        }
204
205        /*
206         * Performs affine adaptation
207         */
208        boolean calcAffineAdaptation(final FImage fimage, EllipticInterestPointData kpt, AbstractStructureTensorIPD ipd) {
209//              DisplayUtilities.createNamedWindow("warp", "Warped Image ROI",true);
210                Matrix transf = new Matrix(2, 3);       // Transformation matrix
211                Point2dImpl c = new Point2dImpl();      // Transformed point
212                Point2dImpl p = new Point2dImpl();      // Image point
213
214                Matrix U = Matrix.identity(2, 2);       // Normalization matrix
215
216                Matrix Mk = U.copy(); 
217                FImage img_roi, warpedImg = new FImage(1,1);
218                float Qinv = 1, q, si = kpt.scale; //sd = 0.75f * si;
219                float kptSize = 2 * 3 * kpt.scale;
220                boolean divergence = false, convergence = false;
221                int i = 0;
222
223                //Coordinates in image
224                int py = (int) kpt.y;
225                int px = (int) kpt.x;
226
227                //Roi coordinates
228                int roix, roiy;
229
230                //Coordinates in U-trasformation
231                int cx = px;
232                int cy = py;
233                int cxPr = cx;
234                int cyPr = cy;
235
236                float radius = kptSize / 2 * 1.4f;
237                float half_width, half_height;
238
239                Rectangle roi;
240
241                //Affine adaptation
242                while (i <= 10 && !divergence && !convergence)
243                {
244                        //Transformation matrix 
245                        MatrixUtils.zero(transf);
246                        transf.setMatrix(0, 1, 0, 1, U);
247                        
248                        kpt.setTransform(U);
249
250                        Rectangle boundingBox = new Rectangle();
251
252                        double ac_b2 = U.det();
253                        boundingBox.width = (float) Math.ceil(U.get(1, 1)/ac_b2  * 3 * si*1.4 );
254                        boundingBox.height = (float) Math.ceil(U.get(0, 0)/ac_b2 * 3 * si*1.4 );
255
256                        //Create window around interest point
257                        half_width = Math.min((float) Math.min(fimage.width - px-1, px), boundingBox.width);
258                        half_height = Math.min((float) Math.min(fimage.height - py-1, py), boundingBox.height);
259                        
260                        if (half_width <= 0 || half_height <= 0) return divergence;
261                        
262                        roix = Math.max(px - (int) boundingBox.width, 0);
263                        roiy = Math.max(py - (int) boundingBox.height, 0);
264                        roi = new Rectangle(roix, roiy, px - roix + half_width+1, py - roiy + half_height+1);
265
266                        //create ROI
267                        img_roi = fimage.extractROI(roi);
268
269                        //Point within the ROI
270                        p.x = px - roix;
271                        p.y = py - roiy;
272
273                        //Find coordinates of square's angles to find size of warped ellipse's bounding box
274                        float u00 = (float) U.get(0, 0);
275                        float u01 = (float) U.get(0, 1);
276                        float u10 = (float) U.get(1, 0);
277                        float u11 = (float) U.get(1, 1);
278
279                        float minx = u01 * img_roi.height < 0 ? u01 * img_roi.height : 0;
280                        float miny = u10 * img_roi.width < 0 ? u10 * img_roi.width : 0;
281                        float maxx = (u00 * img_roi.width > u00 * img_roi.width + u01 * img_roi.height ? u00
282                                        * img_roi.width : u00 * img_roi.width + u01 * img_roi.height) - minx;
283                        float maxy = (u11 * img_roi.width > u10 * img_roi.width + u11 * img_roi.height ? u11
284                                        * img_roi.height : u10 * img_roi.width + u11 * img_roi.height) - miny;
285
286                        //Shift
287                        transf.set(0, 2, -minx);
288                        transf.set(1, 2, -miny);
289
290                        if (maxx >=  2*radius+1 && maxy >=  2*radius+1)
291                        {
292                                //Size of normalized window must be 2*radius
293                                //Transformation
294                                FImage warpedImgRoi;
295                                FProjectionProcessor proc = new FProjectionProcessor();
296                                proc.setMatrix(transf);
297                                img_roi.accumulateWith(proc);
298                                warpedImgRoi = proc.performProjection(0, (int)maxx, 0, (int)maxy, null);
299
300//                              DisplayUtilities.displayName(warpedImgRoi.clone().normalise(), "warp");
301                                
302                                //Point in U-Normalized coordinates
303                                c = p.transform(U);
304                                cx = (int) (c.x - minx);
305                                cy = (int) (c.y - miny);
306                                
307                                
308
309
310                                if (warpedImgRoi.height > 2 * radius+1 && warpedImgRoi.width > 2 * radius+1)
311                                {
312                                        //Cut around normalized patch
313                                        roix = (int) Math.max(cx - Math.ceil(radius), 0.0);
314                                        roiy = (int) Math.max(cy - Math.ceil(radius), 0.0);
315                                        roi = new Rectangle(roix, roiy,
316                                                        cx - roix + (float)Math.min(Math.ceil(radius), warpedImgRoi.width - cx-1)+1,
317                                                        cy - roiy + (float)Math.min(Math.ceil(radius), warpedImgRoi.height - cy-1)+1);
318                                        warpedImg = warpedImgRoi.extractROI(roi);
319
320                                        //Coordinates in cutted ROI
321                                        cx = cx - roix;
322                                        cy = cy - roiy;
323                                } else {
324                                        warpedImg.internalAssign(warpedImgRoi);
325                                }
326                                
327                                if(logger.getLevel() == Level.DEBUG){
328                                        displayCurrentPatch(img_roi.clone().normalise(),p.x,p.y,warpedImg.clone().normalise(),cx,cy,U,si*3);
329                                }
330                                
331                                //Integration Scale selection
332                                si = selIntegrationScale(warpedImg, si, new Pixel(cx, cy));
333
334                                //Differentation scale selection
335                                if(fastDifferentiationScale ){
336                                        ipd = selDifferentiationScaleFast(warpedImg, ipd, si, new Pixel(cx, cy));
337                                }
338                                else{
339                                        ipd = selDifferentiationScale(warpedImg, ipd, si, new Pixel(cx, cy));
340                                }
341                                
342                                if(ipd.maxima.size() == 0){
343                                        divergence = true;
344                                        continue;
345                                }
346                                //Spatial Localization
347                                cxPr = cx; //Previous iteration point in normalized window
348                                cyPr = cy;
349//
350//                              float cornMax = 0;
351//                              for (int j = 0; j < 3; j++)
352//                              {
353//                                      for (int t = 0; t < 3; t++)
354//                                      {
355//                                              float dx2 = Lxm2smooth.pixels[cyPr - 1 + j][cxPr - 1 + t];
356//                                              float dy2 = Lym2smooth.pixels[cyPr - 1 + j][cxPr - 1 + t];
357//                                              float dxy = Lxmysmooth.pixels[cyPr - 1 + j][cxPr - 1 + t];
358//                                              float det = dx2 * dy2 - dxy * dxy;
359//                                              float tr = dx2 + dy2;
360//                                              float cornerness = (float) (det - (0.04 * Math.pow(tr, 2)));
361//                                              
362//                                              if (cornerness > cornMax) {
363//                                                      cornMax = cornerness;
364//                                                      cx = cxPr - 1 + t;
365//                                                      cy = cyPr - 1 + j;
366//                                              }
367//                                      }
368//                              }
369                                
370                                FValuePixel max = ipd.findMaximum(new Rectangle(cxPr - 1, cyPr -1, 3, 3));
371                                cx = max.x;
372                                cy = max.y;
373
374                                //Transform point in image coordinates
375                                p.x = px;
376                                p.y = py;
377                                
378                                //Displacement vector
379                                c.x = cx - cxPr;
380                                c.y = cy - cyPr;
381                                
382                                //New interest point location in image
383                                p.translate(c.transform(U.inverse()));
384                                px = (int) p.x;
385                                py = (int) p.y;
386
387                                q = calcSecondMomentSqrt(ipd, new Pixel(cx, cy), Mk);
388
389                                float ratio = 1 - q;
390
391                                //if ratio == 1 means q == 0 and one axes equals to 0
392                                if (!Float.isNaN(ratio) && ratio != 1)
393                                {
394                                        //Update U matrix
395                                        U = U.times(Mk);
396
397                                        Matrix uVal, uV;
398//                                      EigenvalueDecomposition ueig = U.eig(); 
399                                        EigenValueVectorPair ueig = MatrixUtils.symmetricEig2x2(U);
400                                        uVal = ueig.getValues();
401                                        uV = ueig.getVectors();
402
403                                        Qinv = normMaxEval(U, uVal, uV);
404
405                                        //Keypoint doesn't converge
406                                        if (Qinv >= 6) {
407                                                logger.debug("QInverse too large, feature too edge like, affine divergence!");
408                                                divergence = true;
409                                        } else if (ratio <= 0.05) { //Keypoint converges
410                                                convergence = true;
411
412                                                //Set transformation matrix
413                                                MatrixUtils.zero(transf);
414                                                transf.setMatrix(0, 1, 0, 1, U);
415                                                // The order here matters, setTransform uses the x and y to calculate a new ellipse
416                                                kpt.x = px;
417                                                kpt.y = py;
418                                                kpt.scale = si;
419                                                kpt.setTransform(U);
420                                                kpt.score = max.value;
421
422//                                              ax1 = (float) (1 / Math.abs(uVal.get(1, 1)) * 3 * si);
423//                                              ax2 = (float) (1 / Math.abs(uVal.get(0, 0)) * 3 * si);
424//                                              phi = Math.atan(uV.get(1, 1) / uV.get(0, 1));
425//                                              kpt.axes = new Point2dImpl(ax1, ax2);
426//                                              kpt.phi = phi;
427//                                              kpt.centre = new Pixel(px, py);
428//                                              kpt.si = si;
429//                                              kpt.size = 2 * 3 * si;
430
431                                        } else {
432                                                radius = (float) (3 * si * 1.4);
433                                        }
434                                } else {
435                                        logger.debug("QRatio was close to 0, affine divergence!");
436                                        divergence = true;
437                                }
438                        } else {
439                                logger.debug("Window size has grown too fast, scale divergence!");
440                                divergence = true;
441                        }
442
443                        ++i;
444                }
445                if(!divergence && !convergence){
446                        logger.debug("Reached max iterations!");
447                }
448                return convergence;
449        }
450
451        private void displayCurrentPatch(FImage unwarped, float unwarpedx, float unwarpedy, FImage warped, int warpedx, int warpedy, Matrix transform, float scale) {
452                DisplayUtilities.createNamedWindow("warpunwarp", "Warped and Unwarped Image",true);
453                logger.debug("Displaying patch");
454                float resizeScale = 5f;
455                float warppedPatchScale = resizeScale ;
456                ResizeProcessor patchSizer = new ResizeProcessor(resizeScale);
457                FImage warppedPatchGrey = warped.process(patchSizer);
458                MBFImage warppedPatch = new MBFImage(warppedPatchGrey.clone(),warppedPatchGrey.clone(),warppedPatchGrey.clone());
459                float x = warpedx*warppedPatchScale;
460                float y = warpedy*warppedPatchScale;
461                float r = scale * warppedPatchScale;
462                
463                warppedPatch.createRenderer().drawShape(new Ellipse(x,y,r,r,0), RGBColour.RED);
464                warppedPatch.createRenderer().drawPoint(new Point2dImpl(x,y), RGBColour.RED,3);
465                
466                FImage unwarppedPatchGrey = unwarped.clone();
467                MBFImage unwarppedPatch = new MBFImage(unwarppedPatchGrey.clone(),unwarppedPatchGrey.clone(),unwarppedPatchGrey.clone());
468                unwarppedPatch = unwarppedPatch.process(patchSizer);
469                float unwarppedPatchScale = resizeScale;
470                
471                x = unwarpedx * unwarppedPatchScale ;
472                y = unwarpedy * unwarppedPatchScale ;
473//              Matrix sm = state.selected.secondMoments;
474//              float scale = state.selected.scale * unwarppedPatchScale;
475//              Ellipse e = EllipseUtilities.ellipseFromSecondMoments(x, y, sm, scale*2);
476                Ellipse e = EllipseUtilities.fromTransformMatrix2x2(transform,x,y,scale*unwarppedPatchScale);
477                
478                unwarppedPatch.createRenderer().drawShape(e, RGBColour.BLUE);
479                unwarppedPatch.createRenderer().drawPoint(new Point2dImpl(x,y), RGBColour.RED,3);
480                // give the patch a border (10px, black)
481                warppedPatch = warppedPatch.padding(5, 5, RGBColour.BLACK);
482                unwarppedPatch = unwarppedPatch.padding(5, 5,RGBColour.BLACK);
483                
484                MBFImage displayArea = warppedPatch.newInstance(warppedPatch.getWidth()*2, warppedPatch.getHeight());
485                displayArea.createRenderer().drawImage(warppedPatch, 0, 0);
486                displayArea.createRenderer().drawImage(unwarppedPatch, warppedPatch.getWidth(), 0);
487                DisplayUtilities.displayName(displayArea, "warpunwarp");
488                logger.debug("Done");   
489        }
490
491        /*
492         * Selects the integration scale that maximize LoG in point c
493         */
494        float selIntegrationScale(final FImage image, float si, Pixel c) {
495                FImage L;
496                int cx = c.x;
497                int cy = c.y;
498                float maxLap = -Float.MAX_VALUE;
499                float maxsx = si;
500                float sigma, sigma_prev = 0;
501
502                L = image.clone();
503                /* 
504                 * Search best integration scale between previous and successive layer
505                 */
506                for (float u = 0.7f; u <= 1.41; u += 0.1)
507                {
508                        float sik = u * si;
509                        sigma = (float) Math.sqrt(Math.pow(sik, 2) - Math.pow(sigma_prev, 2));
510
511                        L.processInplace(new FGaussianConvolve(sigma, 3));
512                        
513                        sigma_prev = sik;
514//                      Lap = L.process(LAPLACIAN_KERNEL_CONV);
515
516                        float lapVal = sik * sik * Math.abs(LAPLACIAN_KERNEL_CONV.responseAt(cx,cy,L));
517//                      float lapVal = sik * sik * Math.abs(Lap.pixels[cy][cx]);
518
519                        if (lapVal >= maxLap)
520                        {
521                                maxLap = lapVal;
522                                maxsx = sik;
523                        }
524                }
525                return maxsx;
526        }
527
528        /*
529         * Calculates second moments matrix square root
530         */
531        float calcSecondMomentSqrt(AbstractStructureTensorIPD ipd, Pixel p, Matrix Mk)
532        {
533                Matrix M, V, eigVal, Vinv;
534
535                M = calcSecondMomentMatrix(ipd, p.x, p.y);
536
537                /* *
538                 * M = V * D * V.inv()
539                 * V has eigenvectors as columns
540                 * D is a diagonal Matrix with eigenvalues as elements
541                 * V.inv() is the inverse of V
542                 * */
543//              EigenvalueDecomposition meig = M.eig();
544                EigenValueVectorPair meig = MatrixUtils.symmetricEig2x2(M);
545                eigVal = meig.getValues();
546                V = meig.getVectors();
547                
548//              V = V.transpose();
549                Vinv = V.inverse();
550
551                double eval1 = Math.sqrt(eigVal.get(0, 0));
552                eigVal.set(0, 0, eval1);
553                double eval2 = Math.sqrt(eigVal.get(1, 1));
554                eigVal.set(1, 1, eval2);
555
556                //square root of M
557                Mk.setMatrix(0, 1, 0, 1, V.times(eigVal).times(Vinv));
558                
559                //return q isotropic measure
560                return (float) (Math.min(eval1, eval2) / Math.max(eval1, eval2));
561        }
562
563        float normMaxEval(Matrix U, Matrix uVal, Matrix uVec) {
564                /* *
565                 * Decomposition:
566                 * U = V * D * V.inv()
567                 * V has eigenvectors as columns
568                 * D is a diagonal Matrix with eigenvalues as elements
569                 * V.inv() is the inverse of V
570                 * */
571//              uVec = uVec.transpose();
572                Matrix uVinv = uVec.inverse();
573
574                //Normalize min eigenvalue to 1 to expand patch in the direction of min eigenvalue of U.inv()
575                double uval1 = uVal.get(0, 0);
576                double uval2 = uVal.get(1, 1);
577
578                if (Math.abs(uval1) < Math.abs(uval2))
579                {
580                        uVal.set(0, 0, 1);
581                        uVal.set(1, 1, uval2 / uval1);
582
583                } else
584                {
585                        uVal.set(1, 1, 1);
586                        uVal.set(0, 0, uval1 / uval2);
587                }
588
589                //U normalized
590                U.setMatrix(0,1,0,1,uVec.times(uVal).times(uVinv));
591
592                return (float) (Math.max(Math.abs(uVal.get(0, 0)), Math.abs(uVal.get(1, 1))) / 
593                                Math.min(Math.abs(uVal.get(0, 0)), Math.abs(uVal.get(1, 1)))); //define the direction of warping
594        }
595
596        /*
597         * Selects diffrentiation scale
598         */
599        AbstractStructureTensorIPD selDifferentiationScale(FImage img, AbstractStructureTensorIPD ipdToUse, float si, Pixel c) {
600                AbstractStructureTensorIPD best = null;
601                float s = 0.5f;
602                float sigma_prev = 0, sigma;
603
604                FImage L;
605
606                double qMax = 0;
607
608                L = img.clone();
609                
610                AbstractStructureTensorIPD ipd = ipdToUse.clone();
611
612                while (s <= 0.751)
613                {
614                        Matrix M;
615                        float sd = s * si;
616
617                        //Smooth previous smoothed image L
618                        sigma = (float) Math.sqrt(Math.pow(sd, 2) - Math.pow(sigma_prev, 2));
619
620                        L.processInplace(new FGaussianConvolve(sigma, 3));
621                        sigma_prev = sd;
622
623
624                        //X and Y derivatives
625                        ipd.setDetectionScale(sd);
626                        ipd.setIntegrationScale(si);
627                        ipd.setImageBlurred(true);
628                        
629                        ipd.findInterestPoints(L);
630//                      FImage Lx = L.process(new FConvolution(DX_KERNEL.multiply(sd)));
631//                      FImage Ly = L.process(new FConvolution(DY_KERNEL.multiply(sd)));                
632//
633//                      FGaussianConvolve gauss = new FGaussianConvolve(si, 3);
634//                      
635//                      FImage Lxm2 = Lx.multiply(Lx);
636//                      dx2 = Lxm2.process(gauss);
637//                      
638//                      FImage Lym2 = Ly.multiply(Ly);
639//                      dy2 = Lym2.process(gauss);
640//
641//                      FImage Lxmy = Lx.multiply(Ly);
642//                      dxy = Lxmy.process(gauss);
643                        
644                        M = calcSecondMomentMatrix(ipd, c.x, c.y);
645
646                        //calc eigenvalues
647//                      EigenvalueDecomposition meig = M.eig();
648                        EigenValueVectorPair meig = MatrixUtils.symmetricEig2x2(M);
649                        Matrix eval = meig.getValues();
650                        double eval1 = Math.abs(eval.get(0, 0));
651                        double eval2 = Math.abs(eval.get(1, 1));
652                        double q = Math.min(eval1, eval2) / Math.max(eval1, eval2);
653
654                        if (q >= qMax) {
655                                qMax = q;
656                                best = ipd.clone();
657                        }
658
659                        s += 0.05;
660                }
661                return best;
662        }
663        
664        AbstractStructureTensorIPD selDifferentiationScaleFast(FImage img, AbstractStructureTensorIPD ipd, float si, Pixel c) {
665                AbstractStructureTensorIPD best = ipd.clone();
666                float s = 0.75f;
667                float sigma;
668                FImage L;
669                L = img.clone();
670                float sd = s * si;
671
672                //Smooth previous smoothed image L
673                sigma = sd;
674
675                L.processInplace(new FGaussianConvolve(sigma, 3));
676                
677                //X and Y derivatives
678                best.setDetectionScale(sd);
679                best.setIntegrationScale(si);
680                best.setImageBlurred(true);
681                
682                best.findInterestPoints(L);
683                
684//              M = calcSecondMomentMatrix(best, c.x, c.y);
685
686//              EigenValueVectorPair meig = MatrixUtils.symmetricEig2x2(M);
687//              Matrix eval = meig.getD();
688//              double eval1 = Math.abs(eval.get(0, 0));
689//              double eval2 = Math.abs(eval.get(1, 1));
690
691                return best;
692        }
693
694//      void calcAffineCovariantRegions(final Matrix image, final vector<KeyPoint> & keypoints,
695//                      vector<Elliptic_KeyPoint> & affRegions, string detector_type)
696//      {
697//
698//              for (size_t i = 0; i < keypoints.size(); ++i)
699//              {
700//                      KeyPoint kp = keypoints[i];
701//                      Elliptic_KeyPoint ex(kp.pt, 0, Size_<float> (kp.size / 2, kp.size / 2), kp.size,
702//                                      kp.size / 6);
703//
704//                      if (calcAffineAdaptation(image, ex))        
705//                              affRegions.push_back(ex);
706//
707//              }
708//              //Erase similar keypoint
709//              float maxDiff = 4;
710//              Matrix colorimg;
711//              for (size_t i = 0; i < affRegions.size(); i++)
712//              {
713//                      Elliptic_KeyPoint kp1 = affRegions[i];
714//                      for (size_t j = i+1; j < affRegions.size(); j++){
715//
716//                              Elliptic_KeyPoint kp2 = affRegions[j];
717//
718//                              if(norm(kp1.centre-kp2.centre)<=maxDiff){
719//                                      double phi1, phi2;
720//                                      Size axes1, axes2;
721//                                      double si1, si2;
722//                                      phi1 = kp1.phi;
723//                                      phi2 = kp2.phi;
724//                                      axes1 = kp1.axes;
725//                                      axes2 = kp2.axes;
726//                                      si1 = kp1.si;
727//                                      si2 = kp2.si;
728//                                      if(Math.abs(phi1-phi2)<15 && Math.max(si1,si2)/Math.min(si1,si2)<1.4 && axes1.width-axes2.width<5 && axes1.height-axes2.height<5){
729//                                              affRegions.erase(affRegions.begin()+j);
730//                                              j--;                        
731//                                      }
732//                              }
733//                      }
734//              }
735//      }
736
737//      void calcAffineCovariantDescriptors(final Ptr<DescriptorExtractor>& dextractor, final Mat& img,
738//                      vector<Elliptic_KeyPoint>& affRegions, Mat& descriptors)
739//      {
740//
741//              assert(!affRegions.empty());
742//              int size = dextractor->descriptorSize();
743//              int type = dextractor->descriptorType();
744//              descriptors = Mat(Size(size, affRegions.size()), type);
745//              descriptors.setTo(0);
746//
747//              int i = 0;
748//
749//              for (vector<Elliptic_KeyPoint>::iterator it = affRegions.begin(); it < affRegions.end(); ++it)
750//              {
751//                      Point p = it->centre;
752//
753//                      Mat_<float> size(2, 1);
754//                      size(0, 0) = size(1, 0) = it->size;
755//
756//                      //U matrix
757//                      Matrix transf = it->transf;
758//                      Mat_<float> U(2, 2);
759//                      U.setTo(0);
760//                      Matrix col0 = U.col(0);
761//                      transf.col(0).copyTo(col0);
762//                      Matrix col1 = U.col(1);
763//                      transf.col(1).copyTo(col1);
764//
765//                      float radius = it->size / 2;
766//                      float si = it->si;
767//
768//                      Size_<float> boundingBox;
769//
770//                      double ac_b2 = determinant(U);
771//                      boundingBox.width = ceil(U.get(1, 1)/ac_b2  * 3 * si );
772//                      boundingBox.height = ceil(U.get(0, 0)/ac_b2 * 3 * si );
773//
774//                      //Create window around interest point
775//                      float half_width = Math.min((float) Math.min(img.width - p.x-1, p.x), boundingBox.width);
776//                      float half_height = Math.min((float) Math.min(img.height - p.y-1, p.y), boundingBox.height);
777//                      float roix = max(p.x - (int) boundingBox.width, 0);
778//                      float roiy = max(p.y - (int) boundingBox.height, 0);
779//                      Rect roi = Rect(roix, roiy, p.x - roix + half_width+1, p.y - roiy + half_height+1);
780//
781//                      Matrix img_roi = img(roi);
782//
783//                      size(0, 0) = img_roi.width;
784//                      size(1, 0) = img_roi.height;
785//
786//                      size = U * size;
787//
788//                      Matrix transfImgRoi, transfImg;
789//                      warpAffine(img_roi, transfImgRoi, transf, Size(ceil(size(0, 0)), ceil(size(1, 0))),
790//                                      INTER_AREA, BORDER_DEFAULT);
791//
792//
793//                      Mat_<float> c(2, 1); //Transformed point
794//                      Mat_<float> pt(2, 1); //Image point
795//                      //Point within the Roi
796//                      pt(0, 0) = p.x - roix;
797//                      pt(1, 0) = p.y - roiy;
798//
799//                      //Point in U-Normalized coordinates
800//                      c = U * pt;
801//                      float cx = c(0, 0);
802//                      float cy = c(1, 0);
803//
804//
805//                      //Cut around point to have patch of 2*keypoint->size
806//
807//                      roix = Math.max(cx - radius, 0.f);
808//                      roiy = Math.max(cy - radius, 0.f);
809//
810//                      roi = Rect(roix, roiy, Math.min(cx - roix + radius, size(0, 0)),
811//                                      Math.min(cy - roiy + radius, size(1, 0)));
812//                      transfImg = transfImgRoi(roi);
813//
814//                      cx = c(0, 0) - roix;
815//                      cy = c(1, 0) - roiy;
816//
817//                      Matrix tmpDesc;
818//                      KeyPoint kp(Point(cx, cy), it->size);
819//
820//                      vector<KeyPoint> k(1, kp);
821//
822//                      transfImg.convertTo(transfImg, CV_8U);
823//                      dextractor->compute(transfImg, k, tmpDesc);
824//
825//                      for (int j = 0; j < tmpDesc.width; j++)
826//                      {
827//                              descriptors.get(i, j) = tmpDesc.get(0, j);
828//                      }
829//
830//                      i++;
831//
832//              }
833//
834//      }
835        
836        /**
837         * an example run
838         * @param args
839         * @throws IOException
840         */
841        public static void main(String[] args) throws IOException {
842                float sd = 5;
843                float si = 1.4f * sd;
844                HessianIPD ipd = new HessianIPD(sd, si);
845                FImage img = ImageUtilities.readF(AffineAdaption.class.getResourceAsStream("/org/openimaj/image/data/sinaface.jpg"));
846                
847//              img = img.multiply(255f);
848                
849//              ipd.findInterestPoints(img);
850//              List<InterestPointData> a = ipd.getInterestPoints(1F/(256*256));
851//              
852//              System.out.println("Found " + a.size() + " features");
853//              
854//              AffineAdaption adapt = new AffineAdaption();
855//              EllipticKeyPoint kpt = new EllipticKeyPoint();
856                MBFImage outImg = new MBFImage(img.clone(),img.clone(),img.clone());
857//              for (InterestPointData d : a) {
858//                      
859////                    InterestPointData d = new InterestPointData();
860////                    d.x = 102;
861////                    d.y = 396;
862//                      logger.info("Keypoint at: " + d.x + ", " + d.y);
863//                      kpt.si = si;
864//                      kpt.centre = new Pixel(d.x, d.y);
865//                      kpt.size = 2 * 3 * kpt.si;
866//                      
867//                      boolean converge = adapt.calcAffineAdaptation(img, kpt);
868//                      if(converge)
869//                      {
870//                              outImg.drawShape(new Ellipse(kpt.centre.x,kpt.centre.y,kpt.axes.getX(),kpt.axes.getY(),kpt.phi), RGBColour.BLUE);
871//                              outImg.drawPoint(kpt.centre, RGBColour.RED,3);
872//                      }
873//                      
874//                      
875//                      
876//                      logger.info("... converged: "+ converge);
877//              }
878                AffineAdaption adapt = new AffineAdaption(ipd,new IPDSelectionMode.Count(100));
879                adapt.findInterestPoints(img);
880                InterestPointVisualiser<Float[],MBFImage> ipv = InterestPointVisualiser.visualiseInterestPoints(outImg, adapt.points);
881                DisplayUtilities.display(ipv .drawPatches(RGBColour.BLUE, RGBColour.RED));
882                
883        }
884
885        /**
886         * @param b whether the differencial scaling should be done iteratively (slow) or not (fast)
887         */
888        public void setFastDifferentiationScale(boolean b) {
889                this.fastDifferentiationScale = b;
890        }
891}
892