Jupyter Notebookなどで、コードを実装して実際に確かめてみましょう。
keras、TensorFlowなどのインストールに関してはこのページを参照
Jetson Nanoを使う場合、コードにあるとおりにデータセットを使うとメモリー不足で落ちます(train_test_splitでデータ分割するところ)。SWAP領域を4GB程度は確保しておきましょう。
お急ぎの向きはJetsonではなくGoogleのColaboratoryを使うほうがいいです。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import numpy as np import matplotlib.pyplot as plt from keras.datasets import imdb from keras.preprocessing.text import Tokenizer from keras import models from keras import layers from keras.callbacks import EarlyStopping from sklearn.model_selection import train_test_split #データの準備 (train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words=10000) train_data.shape train_data[0] len(train_data[0]) train_labels[0] #前処理 tokenizer = Tokenizer(num_words=10000) x_train = tokenizer.sequences_to_matrix(train_data,mode='binary') x_test = tokenizer.sequences_to_matrix(test_data,mode='binary') x_train.shape y_train = np.asarray(train_labels).astype('float32') y_test = np.asarray(test_labels).astype('float32') x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.1,random_state=0) print(x_train.shape,x_val.shape) #モデル構築 model = models.Sequential() model.add(layers.Dense(32,activation='relu',input_shape=(10000,))) model.add(layers.Dense(16,activation='relu')) model.add(layers.Dense(1,activation='sigmoid')) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) #学習実行 history = model.fit(x=x_train, y=y_train, epochs=20, batch_size=512, verbose=1, validation_data=(x_val,y_val)) #結果の可視化 history_dict = history.history history_dict.keys() #キーに注意,accだったりaccuracyだったりします acc = history_dict['accuracy'] val_acc = history_dict['val_accuracy'] epochs = range(1,len(acc) + 1) plt.figure(figsize=(12,8)) plt.plot(epochs,acc,label='acc') plt.plot(epochs,val_acc,label='val_acc') plt.ylim((0,1)) plt.legend(loc='best') plt.show() loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1,len(loss) + 1) plt.figure(figsize=(12,8)) plt.plot(epochs,loss,label='loss') plt.plot(epochs,val_loss,label='val_loss') plt.ylim((0,1)) plt.legend(loc='best') plt.show() #ほどよいEpoch数を教えてもらう callbacks = [EarlyStopping(monitor='val_accuracy',patience=3)] history = model.fit(x=x_train, y=y_train, epochs=20, batch_size=512, verbose=1, callbacks=callbacks, validation_data=(x_val,y_val)) # score = model.evaluate(x_test,y_test) print("Test set loss:{},Test set accuray:{}".format(score[0],score[1])) #モデルの再構築と実行 -> ロジスティック回帰の形 model = models.Sequential() model.add(layers.Dense(1,activation='sigmoid',input_shape=(10000,))) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(x=x_train, y=y_train, epochs=100, batch_size=512, verbose=1, callbacks=callbacks, validation_data=(x_val,y_val)) score = model.evaluate(x_test,y_test) print("Test set loss:{},Test set accuray:{}".format(score[0],score[1])) |
Leave a Reply