以下のエラーが発生し、精度を算出することができません。
global xを入れる場所がわからず困っています。
#エラーメッセージ
UnboundLocalError Traceback (most recent call last)
<ipython-input-3-1b587794855b> in <module>()
----> 1 validate_homework()
2 # score_homework()
<ipython-input-2-ebacf1272ad7> in validate_homework()
26 test_y_mini = test_y[:100]
27
---> 28 pred_y = homework(train_X_mini, train_y_mini, test_X_mini, test_y_mini)
29 print(f1_score(test_y_mini, pred_y, average='macro'))
30
<ipython-input-1-1b079a8ccba7> in homework(train_X, train_y, test_X, test_y)
17 def softmax(x):
18 x -= np.max(x, axis=1).reshape((-1, 1))
---> 19 return np.exp(x) / np.sum(np.exp(x), axis=1).reshape((-1, 1))
20
21 def cross_entropy(y, output):
UnboundLocalError: local variable 'x' referenced before assignment
#コード
def homework(train_X, train_y, test_X, test_y):
epoch = 10000 batch_size = 20 learning_rate = 1e-3 input_size = 784 hidden_size = 100 output_size = 10 data_num = train_X.shape[0] np.random.seed(0) W1 = np.random.randn(input_size, hidden_size) b1 = np.zeros(hidden_size) W2 = np.random.randn(hidden_size, output_size) b2 = np.zeros(output_size) def softmax(x): x -= np.max(x, axis=1).reshape((-1, 1)) return np.exp(x) / np.sum(np.exp(x), axis=1).reshape((-1, 1)) def cross_entropy(y, output): batch_size = y.shape[0] return -np.sum(np.log(output[np.arange(batch_size), y])) / batch_size def sigmoid(x): return 1 / (1 + np.exp(-x)) def forward(x): fwd = {} fwd['h1'] = sigmoid(np.dot(x, W1) + b1) fwd['prob'] = softmax(np.dot(fwd['h1'], W2) + b2) return fwd for n in range(epoch): loss_sum = 0 for i in range(0, data_num, batch_size): x = train_X[i:i+batch_size] y = train_y[i:i+batch_size] fwd = forward(x) loss_sum += cross_entropy(y, fwd['prob']) grad = network.gradient(x, y) for key in ('W1', 'b1', 'W2', 'b2'): network.params[key] -= learning_rate * grad[key] loss = network.loss(x, y) train_loss_list.append(loss) if np.mod(n, 1000) == 0: pred_y = np.argmax(forward(test_X)['prob'], axis=1) accuracy = f1_score(test_y, pred_y, average='macro') print("epoch: %5d, loss_sum: %.5f, accuracy: %.5f" % (n, loss_sum, accuracy)) pred_y = np.argmax(forward(test_X)['prob'], axis=1) return pred_y
from sklearn.utils import shuffle
from sklearn.metrics import f1_score
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
import numpy as np
def load_mnist():
mnist = fetch_mldata('MNIST original')
mnist_X, mnist_y = shuffle(mnist.data.astype('float32'),
mnist.target.astype('int32'), random_state=42)
mnist_X = mnist_X / 255.0 return train_test_split(mnist_X, mnist_y, test_size=0.2, random_state=42)
def validate_homework():
train_X, test_X, train_y, test_y = load_mnist()
train_X_mini = train_X[:100] train_y_mini = train_y[:100] test_X_mini = test_X[:100] test_y_mini = test_y[:100] pred_y = homework(train_X_mini, train_y_mini, test_X_mini, test_y_mini) print(f1_score(test_y_mini, pred_y, average='macro'))
def score_homework():
train_X, test_X, train_y, test_y = load_mnist()
pred_y = homework(train_X, train_y, test_X, test_y)
print(f1_score(test_y, pred_y, average='macro'))
validate_homework()
あなたの回答
tips
プレビュー