import numpy import numpy.random import pylab from dispims_color import dispims_color import logreg import hebbian_kmeans rng = numpy.random.RandomState(1) SMALL = 0.001 patchsize = 10 numfeatures = 100 pooling = "quadrants" def crop_patches_color(image, keypoints, patchsize): patches = numpy.zeros((len(keypoints), 3*patchsize**2)) for i, k in enumerate(keypoints): patches[i, :] = image[k[0]-patchsize/2:k[0]+patchsize/2, k[1]-patchsize/2:k[1]+patchsize/2,:].flatten() return patches def pca(data, var_fraction): """ principal components analysis of data (columnwise in array data), retaining as many components as required to retain var_fraction of the variance """ from numpy.linalg import eigh u, v = eigh(numpy.cov(data, rowvar=0, bias=1)) v = v[:, numpy.argsort(u)[::-1]] u.sort() u = u[::-1] u = u[u.cumsum()<=(u.sum()*var_fraction)] numprincomps = u.shape[0] u[u0) #LOAD DATA trainimages = numpy.loadtxt("cifarmini_color_images_train.txt") trainlabels = numpy.loadtxt("cifarmini_labels_train.txt") testimages = numpy.loadtxt("cifarmini_color_images_test.txt") testlabels = numpy.loadtxt("cifarmini_labels_test.txt") numtest = testimages.shape[0] R = rng.permutation(trainimages.shape[0]) trainimages = trainimages[R] trainlabels = trainlabels[R] allimages = numpy.concatenate((trainimages, testimages), 0) #CROP PATCHES print "generating patches" somepatches = numpy.concatenate([crop_patches_color(im.reshape(3, 32, 32).transpose(1,2,0), numpy.array([numpy.random.randint(patchsize/2, 32-patchsize/2, 10), numpy.random.randint(patchsize/2, 32-patchsize/2, 10)]).T, patchsize) for im in trainimages]) R = rng.permutation(somepatches.shape[0]) somepatches = somepatches[R, :] print "numpatches: ", somepatches.shape[0] print "done" #LEARN WHITENING MATRICES print "whitening" meanstd = somepatches.std() somepatches -= somepatches.mean(1)[:,None] somepatches /= somepatches.std(1)[:,None] + 0.1 * meanstd somepatches_mean = somepatches.mean(0)[None,:] somepatches_std = somepatches.std(0)[None,:] somepatches -= somepatches_mean somepatches /= somepatches_std + 0.1 * meanstd pca_backward, pca_forward, zca_backward, zca_forward = pca(somepatches, 0.9) print "done" pylab.figure(1) dispims_color(somepatches[:100].reshape(100,patchsize,patchsize,3), 2) pylab.figure(2) dispims_color(numpy.dot(numpy.dot(somepatches[:100], pca_backward.T), pca_forward.T).reshape(100,patchsize,patchsize,3), 2) #prototypes_whitened = numpy.dot(prototypes, pca_backward.T) pylab.figure(3) dispims_color(somepatches[:100].reshape(100,patchsize,patchsize,3), 2) pylab.figure(4) dispims_color(pca_backward.reshape(pca_backward.shape[0], patchsize, patchsize, 3), 2) #pylab.figure(3) #dispims_color(zca_backward.T.reshape(zca_backward.shape[0], patchsize, patchsize, 3), 2) #LEARN FEATURES allfeatures = [] #prototypes = somepatches[:numfeatures] #prototypes_whitened = numpy.dot(prototypes, pca_backward.T) prototypes_whitened = hebbian_kmeans.kmeans(numpy.dot(somepatches[:numfeatures], pca_backward.T), numpy.dot(somepatches, pca_backward.T), learningrate=0.01, numepochs=200) pylab.figure(5) dispims_color(numpy.dot(prototypes_whitened, pca_forward.T).reshape(prototypes_whitened.shape[0], patchsize, patchsize, 3), 1) pylab.figure(6) dispims_color(numpy.dot(prototypes_whitened, pca_backward).reshape(prototypes_whitened.shape[0], patchsize, patchsize, 3), 1) #EXTRACT FEATURES print "extracting features", print "xxxxx", for i, image in enumerate(allimages): print "\b\b\b\b\b\b{0:5d}".format(i), image = image.reshape(3, 32, 32).transpose(1,2,0) keypoints = numpy.array([c.flatten() for c in numpy.meshgrid(numpy.arange(patchsize/2, 32-patchsize/2), numpy.arange(patchsize/2, 32-patchsize/2))]).T patches = crop_patches_color(image, keypoints, patchsize) patches -= patches.mean(1)[:,None] patches /= patches.std(1)[:,None] + 0.1 * meanstd patches -= somepatches_mean patches /= somepatches_std + 0.1 * meanstd patches = numpy.dot(patches, pca_backward.T) if pooling=="everywhere": allfeatures.append(extract_features(patches, prototypes_whitened).mean(0)) elif pooling=="quadrants": quadrants = numpy.array([int(str(int(a[0]>=16))+str(int(a[1]>=16)), 2) for a in keypoints]) features = extract_features(patches, prototypes_whitened) allfeatures.append(numpy.array([(features * (quadrants==j)[:,None]).mean(0) for j in range(4)]).reshape(4*numfeatures)) print numtrain = 1800 numvali = 200 alltrainfeatures = numpy.vstack(allfeatures[:numtrain+numvali]) testfeatures = numpy.vstack(allfeatures[numtrain+numvali:]) trainfeatures = alltrainfeatures[:numtrain] valifeatures = alltrainfeatures[numtrain:] alltrainlabels = trainlabels valilabels = trainlabels[numtrain:] trainlabels = trainlabels[:numtrain] #CLASSIFICATION WITH CROSSVALIDATION weightcosts = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.0] valicosts = [] lr = logreg.Logreg(10, trainfeatures.shape[1]) lr.train(trainfeatures.T, trainlabels.T, numsteps=100, verbose=False, weightcost=weightcosts[0]) #lr.train_cg(trainfeatures.T, trainlabels.T, weightcost=weightcosts[0], maxnumlinesearch=1000) valicosts.append(lr.zeroone(valifeatures.T, valilabels.T)) for wcost in weightcosts[1:]: lr.train(trainfeatures.T, trainlabels.T, numsteps=100, verbose=False,weightcost=wcost) #lr.train_cg(trainfeatures.T, trainlabels.T, weightcost=wcost, maxnumlinesearch=1000) valicosts.append(lr.zeroone(valifeatures.T, valilabels.T)) winningwcost = weightcosts[numpy.argmin(valicosts)] lr.train(alltrainfeatures.T, alltrainlabels.T, numsteps=100, verbose=False, weightcost=winningwcost) #lr.train_cg(alltrainfeatures.T, alltrainlabels.T, weightcost=winningwcost, maxnumlinesearch=1000) print "winning weightcost: ", winningwcost print "logreg train performance: ", 1.0 - lr.zeroone(trainfeatures.T, trainlabels.T) print "logreg test performance: ", 1.0 - lr.zeroone(testfeatures.T, testlabels.T)