amount of data required for a good ann mdel
$begingroup$
I am new to deep learning and started with ANN. I have a dataset with 15 parameters and 2100 rows. r2_score with Multi linear regression and random forest models is around 85% but when i try an ANN with this dataset r2_score is very less, around 32% . Is it because the number of rows are less? If so atleast what number of rows are required to go for ANN. I have added the code too for reference
import pandas as pd
dataset = pd.read_csv('chiller-2_runningdata_withcommon_parameters.csv')
dataset = dataset.drop(['DateTime','delta','KWH','RunStatus','OP Hours'], axis=1)
X = dataset.iloc[:,:-1 ].values
y = dataset.iloc[:,15:16].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_Y = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
y_train = sc_Y.fit_transform(y_train)
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the second hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 32, epochs = 500)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = sc_Y.inverse_transform(y_pred)
y_pred_train = classifier.predict(X_train)
y_pred_train = sc_Y.inverse_transform(y_pred_train)
y_train = sc_Y.inverse_transform(y_train)
#calculate r2_score
from sklearn.metrics import r2_score
score_train = r2_score(y_pred_train,y_train)
score_test = r2_score(y_pred,y_test)
machine-learning neural-network dataset regression
$endgroup$
add a comment |
$begingroup$
I am new to deep learning and started with ANN. I have a dataset with 15 parameters and 2100 rows. r2_score with Multi linear regression and random forest models is around 85% but when i try an ANN with this dataset r2_score is very less, around 32% . Is it because the number of rows are less? If so atleast what number of rows are required to go for ANN. I have added the code too for reference
import pandas as pd
dataset = pd.read_csv('chiller-2_runningdata_withcommon_parameters.csv')
dataset = dataset.drop(['DateTime','delta','KWH','RunStatus','OP Hours'], axis=1)
X = dataset.iloc[:,:-1 ].values
y = dataset.iloc[:,15:16].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_Y = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
y_train = sc_Y.fit_transform(y_train)
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the second hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 32, epochs = 500)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = sc_Y.inverse_transform(y_pred)
y_pred_train = classifier.predict(X_train)
y_pred_train = sc_Y.inverse_transform(y_pred_train)
y_train = sc_Y.inverse_transform(y_train)
#calculate r2_score
from sklearn.metrics import r2_score
score_train = r2_score(y_pred_train,y_train)
score_test = r2_score(y_pred,y_test)
machine-learning neural-network dataset regression
$endgroup$
add a comment |
$begingroup$
I am new to deep learning and started with ANN. I have a dataset with 15 parameters and 2100 rows. r2_score with Multi linear regression and random forest models is around 85% but when i try an ANN with this dataset r2_score is very less, around 32% . Is it because the number of rows are less? If so atleast what number of rows are required to go for ANN. I have added the code too for reference
import pandas as pd
dataset = pd.read_csv('chiller-2_runningdata_withcommon_parameters.csv')
dataset = dataset.drop(['DateTime','delta','KWH','RunStatus','OP Hours'], axis=1)
X = dataset.iloc[:,:-1 ].values
y = dataset.iloc[:,15:16].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_Y = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
y_train = sc_Y.fit_transform(y_train)
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the second hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 32, epochs = 500)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = sc_Y.inverse_transform(y_pred)
y_pred_train = classifier.predict(X_train)
y_pred_train = sc_Y.inverse_transform(y_pred_train)
y_train = sc_Y.inverse_transform(y_train)
#calculate r2_score
from sklearn.metrics import r2_score
score_train = r2_score(y_pred_train,y_train)
score_test = r2_score(y_pred,y_test)
machine-learning neural-network dataset regression
$endgroup$
I am new to deep learning and started with ANN. I have a dataset with 15 parameters and 2100 rows. r2_score with Multi linear regression and random forest models is around 85% but when i try an ANN with this dataset r2_score is very less, around 32% . Is it because the number of rows are less? If so atleast what number of rows are required to go for ANN. I have added the code too for reference
import pandas as pd
dataset = pd.read_csv('chiller-2_runningdata_withcommon_parameters.csv')
dataset = dataset.drop(['DateTime','delta','KWH','RunStatus','OP Hours'], axis=1)
X = dataset.iloc[:,:-1 ].values
y = dataset.iloc[:,15:16].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_Y = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
y_train = sc_Y.fit_transform(y_train)
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu', input_dim = 15))
# Adding the second hidden layer
classifier.add(Dense(units = 8, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 32, epochs = 500)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = sc_Y.inverse_transform(y_pred)
y_pred_train = classifier.predict(X_train)
y_pred_train = sc_Y.inverse_transform(y_pred_train)
y_train = sc_Y.inverse_transform(y_train)
#calculate r2_score
from sklearn.metrics import r2_score
score_train = r2_score(y_pred_train,y_train)
score_test = r2_score(y_pred,y_test)
machine-learning neural-network dataset regression
machine-learning neural-network dataset regression
edited yesterday
Chinni
asked yesterday
ChinniChinni
83
83
add a comment |
add a comment |
2 Answers
2
active
oldest
votes
$begingroup$
You are using a classification loss function for a regression task.
This makes a huge difference as classification error and regression errors are not the same.
Let me give a brief example:
If you had to predict the demand of a commodity for the coming month, its very unlikely that you would be 100% correct. You would be close but not 100% right.
If you are 100% correct, then you have a high accuracy because you predicted it exactly as it must be. A classification loss function would learn that you classified the value right. Hence a high accuracy !
But if you if you are close but not exactly right, the accuracy is zero.
You know it that you are close to the answer , but the classification theories will infer as a poor performing model.
Regression allows this as , the job is to be as close to the prediction as possible.
This is where MSE is preffered over cross-entropy in regression. Because of the intuition it holds.
I would suggest you to dig more into this ( My example would be very naive , sorry ) about loss functions of regression and classification. That would help alot!
I hope you get me.
Cheers!
New contributor
$endgroup$
add a comment |
$begingroup$
You seem to use a loss function for classification problems, binary crossentropy, for a regression problem.
Try to change your compile function to something like
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
Also, you can change your fit function to
model.fit(X_train, y_train, epochs=500, validation_data=(X_test, y_test), verbose=2)
to track your accuracy during training. This might help you detect overfitting and change the amount of epochs accordingly.
Don't worry about your amount of data, try understanding the difference between classification and regression problems instead.
New contributor
$endgroup$
add a comment |
Your Answer
StackExchange.ifUsing("editor", function () {
return StackExchange.using("mathjaxEditing", function () {
StackExchange.MarkdownEditor.creationCallbacks.add(function (editor, postfix) {
StackExchange.mathjaxEditing.prepareWmdForMathJax(editor, postfix, [["$", "$"], ["\\(","\\)"]]);
});
});
}, "mathjax-editing");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "557"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: false,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: null,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f46368%2famount-of-data-required-for-a-good-ann-mdel%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
2 Answers
2
active
oldest
votes
2 Answers
2
active
oldest
votes
active
oldest
votes
active
oldest
votes
$begingroup$
You are using a classification loss function for a regression task.
This makes a huge difference as classification error and regression errors are not the same.
Let me give a brief example:
If you had to predict the demand of a commodity for the coming month, its very unlikely that you would be 100% correct. You would be close but not 100% right.
If you are 100% correct, then you have a high accuracy because you predicted it exactly as it must be. A classification loss function would learn that you classified the value right. Hence a high accuracy !
But if you if you are close but not exactly right, the accuracy is zero.
You know it that you are close to the answer , but the classification theories will infer as a poor performing model.
Regression allows this as , the job is to be as close to the prediction as possible.
This is where MSE is preffered over cross-entropy in regression. Because of the intuition it holds.
I would suggest you to dig more into this ( My example would be very naive , sorry ) about loss functions of regression and classification. That would help alot!
I hope you get me.
Cheers!
New contributor
$endgroup$
add a comment |
$begingroup$
You are using a classification loss function for a regression task.
This makes a huge difference as classification error and regression errors are not the same.
Let me give a brief example:
If you had to predict the demand of a commodity for the coming month, its very unlikely that you would be 100% correct. You would be close but not 100% right.
If you are 100% correct, then you have a high accuracy because you predicted it exactly as it must be. A classification loss function would learn that you classified the value right. Hence a high accuracy !
But if you if you are close but not exactly right, the accuracy is zero.
You know it that you are close to the answer , but the classification theories will infer as a poor performing model.
Regression allows this as , the job is to be as close to the prediction as possible.
This is where MSE is preffered over cross-entropy in regression. Because of the intuition it holds.
I would suggest you to dig more into this ( My example would be very naive , sorry ) about loss functions of regression and classification. That would help alot!
I hope you get me.
Cheers!
New contributor
$endgroup$
add a comment |
$begingroup$
You are using a classification loss function for a regression task.
This makes a huge difference as classification error and regression errors are not the same.
Let me give a brief example:
If you had to predict the demand of a commodity for the coming month, its very unlikely that you would be 100% correct. You would be close but not 100% right.
If you are 100% correct, then you have a high accuracy because you predicted it exactly as it must be. A classification loss function would learn that you classified the value right. Hence a high accuracy !
But if you if you are close but not exactly right, the accuracy is zero.
You know it that you are close to the answer , but the classification theories will infer as a poor performing model.
Regression allows this as , the job is to be as close to the prediction as possible.
This is where MSE is preffered over cross-entropy in regression. Because of the intuition it holds.
I would suggest you to dig more into this ( My example would be very naive , sorry ) about loss functions of regression and classification. That would help alot!
I hope you get me.
Cheers!
New contributor
$endgroup$
You are using a classification loss function for a regression task.
This makes a huge difference as classification error and regression errors are not the same.
Let me give a brief example:
If you had to predict the demand of a commodity for the coming month, its very unlikely that you would be 100% correct. You would be close but not 100% right.
If you are 100% correct, then you have a high accuracy because you predicted it exactly as it must be. A classification loss function would learn that you classified the value right. Hence a high accuracy !
But if you if you are close but not exactly right, the accuracy is zero.
You know it that you are close to the answer , but the classification theories will infer as a poor performing model.
Regression allows this as , the job is to be as close to the prediction as possible.
This is where MSE is preffered over cross-entropy in regression. Because of the intuition it holds.
I would suggest you to dig more into this ( My example would be very naive , sorry ) about loss functions of regression and classification. That would help alot!
I hope you get me.
Cheers!
New contributor
New contributor
answered 1 hour ago
Savinay_Savinay_
212
212
New contributor
New contributor
add a comment |
add a comment |
$begingroup$
You seem to use a loss function for classification problems, binary crossentropy, for a regression problem.
Try to change your compile function to something like
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
Also, you can change your fit function to
model.fit(X_train, y_train, epochs=500, validation_data=(X_test, y_test), verbose=2)
to track your accuracy during training. This might help you detect overfitting and change the amount of epochs accordingly.
Don't worry about your amount of data, try understanding the difference between classification and regression problems instead.
New contributor
$endgroup$
add a comment |
$begingroup$
You seem to use a loss function for classification problems, binary crossentropy, for a regression problem.
Try to change your compile function to something like
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
Also, you can change your fit function to
model.fit(X_train, y_train, epochs=500, validation_data=(X_test, y_test), verbose=2)
to track your accuracy during training. This might help you detect overfitting and change the amount of epochs accordingly.
Don't worry about your amount of data, try understanding the difference between classification and regression problems instead.
New contributor
$endgroup$
add a comment |
$begingroup$
You seem to use a loss function for classification problems, binary crossentropy, for a regression problem.
Try to change your compile function to something like
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
Also, you can change your fit function to
model.fit(X_train, y_train, epochs=500, validation_data=(X_test, y_test), verbose=2)
to track your accuracy during training. This might help you detect overfitting and change the amount of epochs accordingly.
Don't worry about your amount of data, try understanding the difference between classification and regression problems instead.
New contributor
$endgroup$
You seem to use a loss function for classification problems, binary crossentropy, for a regression problem.
Try to change your compile function to something like
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
Also, you can change your fit function to
model.fit(X_train, y_train, epochs=500, validation_data=(X_test, y_test), verbose=2)
to track your accuracy during training. This might help you detect overfitting and change the amount of epochs accordingly.
Don't worry about your amount of data, try understanding the difference between classification and regression problems instead.
New contributor
New contributor
answered 19 hours ago
1b151b15
12
12
New contributor
New contributor
add a comment |
add a comment |
Thanks for contributing an answer to Data Science Stack Exchange!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
Use MathJax to format equations. MathJax reference.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f46368%2famount-of-data-required-for-a-good-ann-mdel%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown