defsoftmax_loss_naive(W, X, y, reg): """ Softmax loss function, naive implementation (with loops) Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W)
############################################################################# # TODO: Compute the softmax loss and its gradient using explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# (N, D) = X.shape C = W.shape[1] #遍历每个样本 for i in range(N): f_i = X[i].dot(W) #进行公式的指数修正 f_i -= np.max(f_i) sum_j = np.sum(np.exp(f_i)) #得到样本中每个类别的概率 p = lambda k : np.exp(f_i[k]) / sum_j loss += - np.log(p(y[i])) #根据softmax求导公式 for k in range(C): p_k = p(k) dW[:, k] += (p_k - (k == y[i])) * X[i] loss /= N loss += 0.5 * reg * np.sum(W * W) dW /= N dW += reg*W ############################################################################# # END OF YOUR CODE # #############################################################################
defsoftmax_loss_vectorized(W, X, y, reg): """ Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive. """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W)
############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# (N, D) = X.shape C = W.shape[1] f = X.dot(W) #在列方向进行指数修正 f -= np.max(f,axis=1,keepdims=True) #求得softmax各个类的概率 p = np.exp(f) / np.sum(np.exp(f),axis=1,keepdims=True) y_lable = np.zeros((N,C)) #y_lable就是(N,C)维的矩阵,每一行中只有对应的那个正确类别 = 1,其他都是0 y_lable[np.arange(N),y] = 1 #cross entropy loss = -1 * np.sum(np.multiply(np.log(p),y_lable)) / N loss += 0.5 * reg * np.sum( W * W) #求导公式,很清晰 dW = X.T.dot(p-y_lable) dW /= N dW += reg*W
############################################################################# # END OF YOUR CODE # #############################################################################
return loss, dW
检验一下向量化和非向量化的时间:
1 2 3 4
naive loss: 2.357905e+00 computed in 0.091724s vectorized loss: 2.357905e+00 computed in 0.002995s Loss difference: 0.000000 Gradient difference: 0.000000
# rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from cs231n.classifiers import Softmax results = {} best_val = -1 best_softmax = None learning_rates = [1e-7, 5e-7] regularization_strengths = [2.5e4, 5e4]
################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ for lr in learning_rates: for reg in regularization_strengths: softmax = Softmax() loss_hist = softmax.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=1500, verbose=True) y_train_pred = softmax.predict(X_train) y_val_pred = softmax.predict(X_val) y_train_acc = np.mean(y_train_pred==y_train) y_val_acc = np.mean(y_val_pred==y_val) results[(lr,reg)] = [y_train_acc, y_val_acc] if y_val_acc > best_val: best_val = y_val_acc best_softmax = softmax ################################################################################ # END OF YOUR CODE # ################################################################################ # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val)
1 2 3 4 5
lr 1.000000e-07 reg 2.500000e+04 train accuracy: 0.350592 val accuracy: 0.354000 lr 1.000000e-07 reg 5.000000e+04 train accuracy: 0.329551 val accuracy: 0.342000 lr 5.000000e-07 reg 2.500000e+04 train accuracy: 0.347286 val accuracy: 0.359000 lr 5.000000e-07 reg 5.000000e+04 train accuracy: 0.328551 val accuracy: 0.337000 best validation accuracy achieved during cross-validation: 0.359000