Train data shape: (49000, 32, 32, 3) Train labels shape: (49000,) Validation data shape: (1000, 32, 32, 3) Validation labels shape: (1000,) Test data shape: (1000, 32, 32, 3) Test labels shape: (1000,)
而后进行32 32 3的图像拉伸,得到:
1 2 3 4
Training data shape: (49000, 3072) Validation data shape: (1000, 3072) Test data shape: (1000, 3072) dev data shape: (500, 3072)
进行一下简单的预处理,减去图像的平均值
1 2 3 4 5 6 7
# Preprocessing: subtract the mean image # first: compute the image mean based on the training data mean_image = np.mean(X_train, axis=0) print(mean_image[:10]) # print a few of the elements plt.figure(figsize=(4,4)) plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image plt.show()
1 2 3 4 5
# second: subtract the mean image from train and test data X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image
1 2 3 4 5 6 7 8
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM # only has to worry about optimizing a single weight matrix W. X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
defsvm_loss_naive(W, X, y, reg): """ Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ dW = np.zeros(W.shape) # initialize the gradient as zero
# compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 #逐个计算每个样本的loss for i in xrange(num_train): #计算每个样本的各个分类得分 scores = X[i].dot(W) correct_class_score = scores[y[i]] #计算每个分类的得分,计入loss中 for j in xrange(num_classes): # 根据公式,j==y[i]的就是本身的分类,不用算了 if j == y[i]: continue margin = scores[j] - correct_class_score + 1# note delta = 1 #如果计算的margin > 0,那么就要算入loss, if margin > 0: loss += margin #公式2 dW[:,y[i]] += -X[i,:].T #公式1 dW[:,j] += X[i, :].T # Right now the loss is a sum over all training examples, but we want it # to be an average instead so we divide by num_train. loss /= num_train dW /= num_train # Add regularization to the loss. loss += reg * np.sum(W * W) dW += reg * W ############################################################################# # TODO: # # Compute the gradient of the loss function and store it dW. # # Rather that first computing the loss and then computing the derivative, # # it may be simpler to compute the derivative at the same time that the # # loss is being computed. As a result you may need to modify some of the # # code above to compute the gradient. # #############################################################################
return loss, dW
写完后,用梯度检验检查一下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Once you've implemented the gradient, recompute it with the code below # and gradient check it with the function we provided for you
# Compute the loss and its gradient at W. loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and # compare them with your analytically computed gradient. The numbers should match # almost exactly along all dimensions. from cs231n.gradient_check import grad_check_sparse f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on # you didn't forget the regularization gradient did you? loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1) f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad)
defsvm_loss_vectorized(W, X, y, reg): """ Structured SVM loss function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive. """ loss = 0.0 dW = np.zeros(W.shape) # initialize the gradient as zero
############################################################################# # TODO: # # Implement a vectorized version of the structured SVM loss, storing the # # result in loss. # ############################################################################# #scores (N,C) scores = X.dot(W) #num_classes = W.shape[1] num_train = X.shape[0] #利用np.arange(),correct_class_score变成了 (num_train,y)的矩阵 correct_class_score = scores[np.arange(num_train),y] correct_class_score = np.reshape(correct_class_score,(num_train,-1)) margins = scores - correct_class_score + 1 margins = np.maximum(0, margins) #然后这里计算了j=y[i]的情形,所以把他们置为0 margins[np.arange(num_train),y] = 0 loss += np.sum(margins) / num_train loss += reg * np.sum( W * W) ############################################################################# # END OF YOUR CODE # #############################################################################
############################################################################# # TODO: # # Implement a vectorized version of the gradient for the structured SVM # # loss, storing the result in dW. # # # # Hint: Instead of computing the gradient from scratch, it may be easier # # to reuse some of the intermediate values that you used to compute the # # loss. # ############################################################################# margins[margins > 0] = 1 #因为j=y[i]的那一个元素的grad要计算 >0 的那些次数次 row_sum = np.sum(margins,axis=1) margins[np.arange(num_train),y] = -row_sum.T #把公式1和2合到一起计算了 dW = np.dot(X.T,margins) dW /= num_train dW += reg * W ############################################################################# # END OF YOUR CODE # #############################################################################
return loss, dW
计算一下两者的时间差:
1 2 3
Naive loss: 8.577034e+00 computed in 0.084761s Vectorized loss: 8.577034e+00 computed in 0.001029s difference: -0.000000
1 2 3
Naive loss and gradient: computed in 0.082744s Vectorized loss and gradient: computed in 0.002027s difference: 0.000000
deftrain(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100, batch_size=200, verbose=False): """ Train this linear classifier using stochastic gradient descent. Inputs: - X: A numpy array of shape (N, D) containing training data; there are N training samples each of dimension D. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label 0 <= c < C for C classes. - learning_rate: (float) learning rate for optimization. - reg: (float) regularization strength. - num_iters: (integer) number of steps to take when optimizing - batch_size: (integer) number of training examples to use at each step. - verbose: (boolean) If true, print progress during optimization. Outputs: A list containing the value of the loss function at each training iteration. """ num_train, dim = X.shape num_classes = np.max(y) + 1# assume y takes values 0...K-1 where K is number of classes if self.W isNone: # lazily initialize W self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W loss_history = [] for it in xrange(num_iters): X_batch = None y_batch = None
######################################################################### # TODO: # # Sample batch_size elements from the training data and their # # corresponding labels to use in this round of gradient descent. # # Store the data in X_batch and their corresponding labels in # # y_batch; after sampling X_batch should have shape (dim, batch_size) # # and y_batch should have shape (batch_size,) # # # # Hint: Use np.random.choice to generate indices. Sampling with # # replacement is faster than sampling without replacement. # ######################################################################### batch_inx = np.random.choice(num_train,batch_size) X_batch = X[batch_inx,:] y_batch = y[batch_inx] ######################################################################### # END OF YOUR CODE # #########################################################################
# evaluate loss and gradient loss, grad = self.loss(X_batch, y_batch, reg) loss_history.append(loss)
# perform parameter update ######################################################################### # TODO: # # Update the weights using the gradient and the learning rate. # ######################################################################### self.W = self.W - learning_rate * grad ######################################################################### # END OF YOUR CODE # #########################################################################
if verbose and it % 100 == 0: print('iteration %d / %d: loss %f' % (it, num_iters, loss))
defpredict(self, X): """ Use the trained weights of this linear classifier to predict labels for data points. Inputs: - X: A numpy array of shape (N, D) containing training data; there are N training samples each of dimension D. Returns: - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional array of length N, and each element is an integer giving the predicted class. """ y_pred = np.zeros(X.shape[0]) ########################################################################### # TODO: # # Implement this method. Store the predicted labels in y_pred. # ########################################################################### score = X.dot(self.W) y_pred = np.argmax(score,axis=1) ########################################################################### # END OF YOUR CODE # ########################################################################### return y_pred
得到预测值
1 2
training accuracy: 0.376633 validation accuracy: 0.384000
# Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of about 0.4 on the validation set. learning_rates = [1e-7, 3e-7,5e-7,9e-7] regularization_strengths = [2.5e4, 1e4,3e4,2e4]
# results is dictionary mapping tuples of the form # (learning_rate, regularization_strength) to tuples of the form # (training_accuracy, validation_accuracy). The accuracy is simply the fraction # of data points that are correctly classified. results = {} best_val = -1# The highest validation accuracy that we have seen so far. best_svm = None# The LinearSVM object that achieved the highest validation rate.
################################################################################ # TODO: # # Write code that chooses the best hyperparameters by tuning on the validation # # set. For each combination of hyperparameters, train a linear SVM on the # # training set, compute its accuracy on the training and validation sets, and # # store these numbers in the results dictionary. In addition, store the best # # validation accuracy in best_val and the LinearSVM object that achieves this # # accuracy in best_svm. # # # # Hint: You should use a small value for num_iters as you develop your # # validation code so that the SVMs don't take much time to train; once you are # # confident that your validation code works, you should rerun the validation # # code with a larger value for num_iters. # ################################################################################ for learning_rate in learning_rates: for regularization_strength in regularization_strengths: svm = LinearSVM() loss_hist = svm.train(X_train, y_train, learning_rate=learning_rate, reg=regularization_strength, num_iters=1500, verbose=True) y_train_pred = svm.predict(X_train) y_val_pred = svm.predict(X_val) y_train_acc = np.mean(y_train_pred==y_train) y_val_acc = np.mean(y_val_pred==y_val) results[(learning_rate,regularization_strength)] = [y_train_acc, y_val_acc] if y_val_acc > best_val: best_val = y_val_acc best_svm = svm
################################################################################ # END OF YOUR CODE # ################################################################################ # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val)