deeplearning ƒƒ³‚‚ƒ³è³†–™...

Download DeepLearning ƒƒ³‚‚ƒ³è³‡–™ 20161220

Post on 21-Apr-2017

203 views

Category:

Data & Analytics

1 download

Embed Size (px)

TRANSCRIPT

  • Learning

    Hands-on

    Deep

  • http://qiita.com/1000ch/items/93841f76ea52551b6a97

  • http://qiita.com/m3y/items/45c7be319e401b24fca8

  • https://github.com/pfnet/chainer

  • curl https://github.com/pfnet/chainer/archive/v1.19.0.tar.gz -o v1.19.tar.gztar xzf v1.19.0.tar.gzcd chainer-1.19.0/examples/mnist

    python train_mnist.py -e 5 -u 200

    GPU: -1# unit: 200# Minibatch-size: 100# epoch: 5

    epoch main/loss validation/main/loss main/accuracy validation/main/accuracy elapsed_time1 0.274201 0.129138 0.921567 0.9604 4.90338 2 0.106627 0.0927404 0.968 0.9691 9.89871 3 0.0719209 0.0824399 0.9775 0.9748 14.9372 4 0.0537356 0.0790009 0.983133 0.9758 19.9937 5 0.0394959 0.0797915 0.987283 0.9753 25.0734

  • class MLP(chainer.Chain): def __init__(self, n_units, n_out): super(MLP, self).__init__( # the size of the inputs to each layer will be inferred l1=L.Linear(None, n_units), # n_in -> n_units l2=L.Linear(None, n_units), # n_units -> n_units l3=L.Linear(None, n_out), # n_units -> n_out ) def __call__(self, x): h1 = F.relu(self.l1(x)) h2 = F.relu(self.l2(h1)) return self.l3(h2)

    13

  • def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--resume', '-r', default='', help='Resume the training from snapshot') parser.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') args = parser.parse_args()

    python train_mnist.py -e 5 -u 200

    29

  • optimizer = chainer.optimizers.Adam()

    60

  • http://www.slideshare.net/nlab_utokyo/20150930-53741757

  • # Run the training trainer.run() # save the tranined model chainer.serializers.save_npz('linear.model', model)if __name__ == '__main__': main()

    103

  • import numpy as np

    # Network definitionclass MLP(chainer.Chain):

    train_mnist.py

    def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') parser.add_argument('--number', '-n', type=int, default=1, help='mnist index') args = parser.parse_args() train, test = chainer.datasets.get_mnist() index = min(args.number,9999) targetNumber = test[index][0].reshape(-1,784) targetAnswer = test[index][1] model = L.Classifier(MLP(args.unit, 10)) chainer.serializers.load_npz('linear.model', model) # Results x = chainer.Variable(targetNumber) v = model.predictor(x) print("mnistIndex:",args.number,"answer:", targetAnswer ,"predict:", np.argmax(v.data))

    main()

    13

  • python predict_mnist.py -u 200 -n 7777

    9999

    mnistIndex: 7777 answer: 5 predict: 5

  • https://pixlr.com/editor/

  • import numpy as npfrom PIL import Image

    # Network definitionclass MLP(chainer.Chain):

    train_mnist.py

    def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') parser.add_argument('--name', '-n', type=str, default="1.png", help='file name') args = parser.parse_args() myNumber = Image.open(args.name).convert("L") myNumber = 1.0 - np.asarray(myNumber, dtype="float32") / 255 myNumber = myNumber.reshape((1, 784)) model = L.Classifier(MLP(args.unit, 10)) chainer.serializers.load_npz('linear.model', model) # Results x = chainer.Variable(myNumber) v = model.predictor(x) print("fileName:", args.name, "predict:", np.argmax(v.data))

    main()

    13

  • python predict_handwritten.py -u 200 -n 2.jpg

    fileName: 2.jpg predict: 2

  • class CNN(chainer.Chain):

    def __init__(self, train=True): super(CNN, self).__init__( conv1=L.Convolution2D(1, 32, 5), conv2=L.Convolution2D(32, 64, 5), l1=L.Linear(1024, 10), ) self.train = train def __call__(self, x): h = F.max_pooling_2d(F.relu(self.conv1(x)), 2) h = F.max_pooling_2d(F.relu(self.conv2(h)), 2) return self.l1(h)

    13 MLP > CNN

  • 43

    # Set up a neural network to trainmodel = L.Classifier(CNN())if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current model.to_gpu() # Copy the model to the GPU# Setup an optimizeroptimizer = chainer.optimizers.Adam() optimizer.setup(model)# Load the MNIST datasettrain, test = chainer.datasets.get_mnist(ndim=3)

  • # Network definitionclass CNN(chainer.Chain):

    Train_mnist_CNN.py

    def main(): parser = argparse.ArgumentParser(description='Chainer example: MNIST') parser.add_argument('--unit', '-u', type=int, default=1000, help='Number of units') parser.add_argument('--name', '-n', type=str, default="1.png", help='file name') args = parser.parse_args() model = L.Classifier(CNN()) myNumber = Image.open(args.name).convert("L") myNumber = 1.0 - np.asarray(myNumber, dtype="float32") / 255 myNumber = myNumber.reshape((1,1,28,28)) chainer.serializers.load_npz('cnn.model', model) # Results x = chainer.Variable(myNumber) v = model.predictor(x) print(args.name , np.argmax(v.data))

    main()

    13

    python predict_handwritten.py -n 2.jpg

View more