Why do my Tensorflow neural networks act so slow after I start training it?
My Tensorflow neural network gets progressively slower as it trains. Every time I train the network, it acts slower and slower.
After the 30th or so iteration of training, it becomes unbearably slow and it's virtually unusable. By the 60th or so iteration, the program stop responding.
I didn't think that this neural network is that complex. It's a simple three layer network put together with Tensorflow.
Do you guys have any idea how to go about this problem?
import tensorflow as tf
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([37500, 500])),
'biases': tf.Variable(tf.random_normal([500]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([500, 250])),
'biases': tf.Variable(tf.random_normal([250]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([250, 125])),
'biases': tf.Variable(tf.random_normal([125]))}
output_layer = {'weights': tf.Variable(tf.random_normal([125, 1])),
'biases': tf.Variable(tf.random_normal([1]))}
class ImageNN():
def train(self, array, target):
x = tf.placeholder('float', name='x')
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
cost = tf.square(output-target)
optimizer = tf.train.AdamOptimizer().minimize(cost)
array = array.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(optimizer, feed_dict={x: array})
sess.close()
del x, l1, l2, output, cost, optimizer
#Do computations with our artificial nueral network
def predict(self, data): #Input data is of size (37500,)
x = tf.placeholder('float', name='x') #get data into the right rank (dimensions), this is just a placeholder, it has no values
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
data = data.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
theOutput = sess.run(output, feed_dict={x: data})
sess.close()
del x, l1, l2, output, data
return theOutput
python tensorflow machine-learning neural-network
add a comment |
My Tensorflow neural network gets progressively slower as it trains. Every time I train the network, it acts slower and slower.
After the 30th or so iteration of training, it becomes unbearably slow and it's virtually unusable. By the 60th or so iteration, the program stop responding.
I didn't think that this neural network is that complex. It's a simple three layer network put together with Tensorflow.
Do you guys have any idea how to go about this problem?
import tensorflow as tf
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([37500, 500])),
'biases': tf.Variable(tf.random_normal([500]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([500, 250])),
'biases': tf.Variable(tf.random_normal([250]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([250, 125])),
'biases': tf.Variable(tf.random_normal([125]))}
output_layer = {'weights': tf.Variable(tf.random_normal([125, 1])),
'biases': tf.Variable(tf.random_normal([1]))}
class ImageNN():
def train(self, array, target):
x = tf.placeholder('float', name='x')
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
cost = tf.square(output-target)
optimizer = tf.train.AdamOptimizer().minimize(cost)
array = array.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(optimizer, feed_dict={x: array})
sess.close()
del x, l1, l2, output, cost, optimizer
#Do computations with our artificial nueral network
def predict(self, data): #Input data is of size (37500,)
x = tf.placeholder('float', name='x') #get data into the right rank (dimensions), this is just a placeholder, it has no values
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
data = data.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
theOutput = sess.run(output, feed_dict={x: data})
sess.close()
del x, l1, l2, output, data
return theOutput
python tensorflow machine-learning neural-network
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37
add a comment |
My Tensorflow neural network gets progressively slower as it trains. Every time I train the network, it acts slower and slower.
After the 30th or so iteration of training, it becomes unbearably slow and it's virtually unusable. By the 60th or so iteration, the program stop responding.
I didn't think that this neural network is that complex. It's a simple three layer network put together with Tensorflow.
Do you guys have any idea how to go about this problem?
import tensorflow as tf
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([37500, 500])),
'biases': tf.Variable(tf.random_normal([500]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([500, 250])),
'biases': tf.Variable(tf.random_normal([250]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([250, 125])),
'biases': tf.Variable(tf.random_normal([125]))}
output_layer = {'weights': tf.Variable(tf.random_normal([125, 1])),
'biases': tf.Variable(tf.random_normal([1]))}
class ImageNN():
def train(self, array, target):
x = tf.placeholder('float', name='x')
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
cost = tf.square(output-target)
optimizer = tf.train.AdamOptimizer().minimize(cost)
array = array.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(optimizer, feed_dict={x: array})
sess.close()
del x, l1, l2, output, cost, optimizer
#Do computations with our artificial nueral network
def predict(self, data): #Input data is of size (37500,)
x = tf.placeholder('float', name='x') #get data into the right rank (dimensions), this is just a placeholder, it has no values
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
data = data.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
theOutput = sess.run(output, feed_dict={x: data})
sess.close()
del x, l1, l2, output, data
return theOutput
python tensorflow machine-learning neural-network
My Tensorflow neural network gets progressively slower as it trains. Every time I train the network, it acts slower and slower.
After the 30th or so iteration of training, it becomes unbearably slow and it's virtually unusable. By the 60th or so iteration, the program stop responding.
I didn't think that this neural network is that complex. It's a simple three layer network put together with Tensorflow.
Do you guys have any idea how to go about this problem?
import tensorflow as tf
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([37500, 500])),
'biases': tf.Variable(tf.random_normal([500]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([500, 250])),
'biases': tf.Variable(tf.random_normal([250]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([250, 125])),
'biases': tf.Variable(tf.random_normal([125]))}
output_layer = {'weights': tf.Variable(tf.random_normal([125, 1])),
'biases': tf.Variable(tf.random_normal([1]))}
class ImageNN():
def train(self, array, target):
x = tf.placeholder('float', name='x')
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
cost = tf.square(output-target)
optimizer = tf.train.AdamOptimizer().minimize(cost)
array = array.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(optimizer, feed_dict={x: array})
sess.close()
del x, l1, l2, output, cost, optimizer
#Do computations with our artificial nueral network
def predict(self, data): #Input data is of size (37500,)
x = tf.placeholder('float', name='x') #get data into the right rank (dimensions), this is just a placeholder, it has no values
l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.add(tf.matmul(l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.sigmoid(output)
data = data.reshape(1, 37500)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
theOutput = sess.run(output, feed_dict={x: data})
sess.close()
del x, l1, l2, output, data
return theOutput
python tensorflow machine-learning neural-network
python tensorflow machine-learning neural-network
asked Nov 24 '18 at 2:23
dav roddav rod
153
153
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37
add a comment |
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37
add a comment |
1 Answer
1
active
oldest
votes
It sounds like a memory issue. You're not deleting l3 or array in your train method, or l3 in your predict method. I don't think this is the cause of the issue as this should get thrown away by the Python interpreter anyway I believe.
How are you calling this class? It might be that you're holding onto the output in memory and it's getting very large.
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
add a comment |
Your Answer
StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53454667%2fwhy-do-my-tensorflow-neural-networks-act-so-slow-after-i-start-training-it%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
1 Answer
1
active
oldest
votes
1 Answer
1
active
oldest
votes
active
oldest
votes
active
oldest
votes
It sounds like a memory issue. You're not deleting l3 or array in your train method, or l3 in your predict method. I don't think this is the cause of the issue as this should get thrown away by the Python interpreter anyway I believe.
How are you calling this class? It might be that you're holding onto the output in memory and it's getting very large.
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
add a comment |
It sounds like a memory issue. You're not deleting l3 or array in your train method, or l3 in your predict method. I don't think this is the cause of the issue as this should get thrown away by the Python interpreter anyway I believe.
How are you calling this class? It might be that you're holding onto the output in memory and it's getting very large.
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
add a comment |
It sounds like a memory issue. You're not deleting l3 or array in your train method, or l3 in your predict method. I don't think this is the cause of the issue as this should get thrown away by the Python interpreter anyway I believe.
How are you calling this class? It might be that you're holding onto the output in memory and it's getting very large.
It sounds like a memory issue. You're not deleting l3 or array in your train method, or l3 in your predict method. I don't think this is the cause of the issue as this should get thrown away by the Python interpreter anyway I believe.
How are you calling this class? It might be that you're holding onto the output in memory and it's getting very large.
answered Nov 24 '18 at 2:36
DavidDavid
30819
30819
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
add a comment |
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
I'm calling the class like so. NN = ImageNN() and later, NN.train(array=trainingArray, target=1) and NN.predict(a). What do you mean, holding onto the output in memory?
– dav rod
Nov 24 '18 at 2:42
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53454667%2fwhy-do-my-tensorflow-neural-networks-act-so-slow-after-i-start-training-it%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
1) try training a simple working mlp first github.com/aymericdamien/TensorFlow-Examples/blob/master/… , this proves that your environment is setup correctly. 2) bump up the input data size in the simple mlp to a comparable input size as your actual input data size, this tests if your hardware is capable of doing the number crunching you expected it to. If training is consistently slow, then its just a matter of updating the hardware. If training is not that slow, then most likely there are bugs in your code.
– teng
Nov 24 '18 at 2:37