-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathtraining.py
87 lines (63 loc) · 2.89 KB
/
training.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# How to run?
# 1. run the training.py once
# 2. call the run_training() in the console to train the model.
# Note:
# it is suggested to restart your kenel to train the model multiple times
#(in order to clear all the variables in the memory)
# Otherwise errors may occur: conv1/weights/biases already exist......
#%%
import os
import numpy as np
import tensorflow as tf
import input_data
import model
#%%
N_CLASSES = 5
IMG_W = 208 # resize the image, if the input image is too large, training will be very slow.
IMG_H = 208
BATCH_SIZE = 16
CAPACITY = 2000
MAX_STEP = 50000 # with current parameters, it is suggested to use MAX_STEP>10k
learning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001
path='/home/user/Desktop/flower-tensorflow/train/'
logs_train_dir = '/home/user/Desktop/flower-tensorflow/train_logits/'
#%%
def run_training():
# you need to change the directories to yours.
path='/home/user/Desktop/flower-tensorflow/train/'
logs_train_dir = '/home/user/Desktop/flower-tensorflow/train_logits/'
train, train_label = input_data.get_files(path)
train_batch, train_label_batch = input_data.get_batch(train,
train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
print train_logits
train_loss = model.losses(train_logits, train_label_batch)
train_op = model.trainning(train_loss, learning_rate)
train__acc = model.evaluation(train_logits, train_label_batch)
#summary_op = tf.summary.merge_all()
sess = tf.Session()
#train_writer = tf.summary.FileWriter(train_logits, sess.graph)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
_, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])
if step % 50 == 0:
print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
if step % 2000 == 0 or (step + 1) == MAX_STEP:
checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()