t(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) I have the following error Epoch 1/10 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) in 1 #train the model, use callbacks ----> 2 model.fit(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) 1 frames /usr/local/lib/python3.9/dist-packages/keras/engine/training.py in tf__train_function(iterator) 13 try: 14 do_return = True ---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope) 16 except: 17 do_return = False ValueError: in user code: File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1249, in train_function * return step_function(self, iterator) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1233, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1222, in run_step ** outputs = model.train_step(data) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1023, in train_step y_pred = self(x, training=True) File "/usr/local/lib/python3.9/dist-packages/keras/utils/traceback_utils.py", line 70, in error_handler raise e.with_traceback(filtered_tb) from None File "/usr/local/lib/python3.9/dist-packages/keras/engine/input_spec.py", line 295, in assert_input_compatibility raise ValueError( ValueError: Input 0 of layer "sequential" is incompatible with the layer: expected shape=(None, 28, 28), found shape=(None, 1850) Please help me asap #Importing Libraries import keras import numpy as np from matplotlib import pyplot as plt import os # Download dataset from sklearn import datasets lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70, resize=0.4) n_samples, h, w = lfw_people.images.shape print(n_samples, h, w) plt.imshow( lfw_people.images[6] , cmap='gray') plt.show() print( lfw_people.target[6] , lfw_people.target_names[lfw_people.target[6]] ) #Data Preprocessing X = lfw_people.data y = lfw_people.target n_features = X.shape[1] target_names = lfw_people.target_names y = keras.utils.to_categorical(y) y n_classes = y.shape[1] #Splitting the data into train and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 1234) X_train = X_train / 255 X_test = X_test / 255 (X_train.shape, X_test.shape, y_train.shape, y_test.shape) # training dataset print('Training dataset:') print(X_train.shape) print(X_train.shape) print(y_train.shape) # testing dataset print('Testing dataset:') print(X_test.shape) print(X_test.shape) print(y_test.shape) #Define the model from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense, Reshape from keras.models import Sequential #Use callbacks from keras.callbacks import EarlyStopping early_stopping = EarlyStopping(patience=10, monitor="val_loss", verbose=1) #define the model, do not forget to reshape the input model = Sequential() model.add(Reshape((28,28,1), input_shape=(28,28))) model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu'), ) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu'), ) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) # add Dense layers model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2) ) model.add(Dense(10, activation='softmax')) print(model.summary()) #Compile the model model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['categorical_accuracy']) #Train the model history = model.fit(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) #Evaluate the model test_loss, test_acc = model.evaluate(X_test, y_test) print('Test accuracy:', test_acc) #Predict the model predictions = model.predict(X_test) #Plot the accuracy and loss graph plt.plot(history.history['categorical_accuracy']) plt.plot(history.history['val_categorical_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show()

Database System Concepts
7th Edition
ISBN:9780078022159
Author:Abraham Silberschatz Professor, Henry F. Korth, S. Sudarshan
Publisher:Abraham Silberschatz Professor, Henry F. Korth, S. Sudarshan
Chapter1: Introduction
Section: Chapter Questions
Problem 1PE
icon
Related questions
Question

https://www.chegg.com/homework-help/questions-and-answers/import-keras-import-numpy-np-matplotlib-import-pyplot-plt-import-os-download-dataset-sklea-q111133942 In line #Train the model history = model.fit(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) I have the following error Epoch 1/10 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-13-cdba15d1180c> in <module> 1 #train the model, use callbacks ----> 2 model.fit(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) 1 frames /usr/local/lib/python3.9/dist-packages/keras/engine/training.py in tf__train_function(iterator) 13 try: 14 do_return = True ---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope) 16 except: 17 do_return = False ValueError: in user code: File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1249, in train_function * return step_function(self, iterator) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1233, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1222, in run_step ** outputs = model.train_step(data) File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1023, in train_step y_pred = self(x, training=True) File "/usr/local/lib/python3.9/dist-packages/keras/utils/traceback_utils.py", line 70, in error_handler raise e.with_traceback(filtered_tb) from None File "/usr/local/lib/python3.9/dist-packages/keras/engine/input_spec.py", line 295, in assert_input_compatibility raise ValueError( ValueError: Input 0 of layer "sequential" is incompatible with the layer: expected shape=(None, 28, 28), found shape=(None, 1850) Please help me asap #Importing Libraries import keras import numpy as np from matplotlib import pyplot as plt import os # Download dataset from sklearn import datasets lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70, resize=0.4) n_samples, h, w = lfw_people.images.shape print(n_samples, h, w) plt.imshow( lfw_people.images[6] , cmap='gray') plt.show() print( lfw_people.target[6] , lfw_people.target_names[lfw_people.target[6]] ) #Data Preprocessing X = lfw_people.data y = lfw_people.target n_features = X.shape[1] target_names = lfw_people.target_names y = keras.utils.to_categorical(y) y n_classes = y.shape[1] #Splitting the data into train and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 1234) X_train = X_train / 255 X_test = X_test / 255 (X_train.shape, X_test.shape, y_train.shape, y_test.shape) # training dataset print('Training dataset:') print(X_train.shape) print(X_train.shape) print(y_train.shape) # testing dataset print('Testing dataset:') print(X_test.shape) print(X_test.shape) print(y_test.shape) #Define the model from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense, Reshape from keras.models import Sequential #Use callbacks from keras.callbacks import EarlyStopping early_stopping = EarlyStopping(patience=10, monitor="val_loss", verbose=1) #define the model, do not forget to reshape the input model = Sequential() model.add(Reshape((28,28,1), input_shape=(28,28))) model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu'), ) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu'), ) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) # add Dense layers model.add(Dense(1024, activation='relu')) model.add(Dropout(0.2) ) model.add(Dense(10, activation='softmax')) print(model.summary()) #Compile the model model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['categorical_accuracy']) #Train the model history = model.fit(X_train, y_train, validation_split=0.1, epochs=10, batch_size=32, callbacks=[early_stopping]) #Evaluate the model test_loss, test_acc = model.evaluate(X_test, y_test) print('Test accuracy:', test_acc) #Predict the model predictions = model.predict(X_test) #Plot the accuracy and loss graph plt.plot(history.history['categorical_accuracy']) plt.plot(history.history['val_categorical_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() 

Expert Solution
steps

Step by step

Solved in 2 steps

Blurred answer
Knowledge Booster
Stack
Learn more about
Need a deep-dive on the concept behind this application? Look no further. Learn more about this topic, computer-science and related others by exploring similar questions and additional content below.
Similar questions
  • SEE MORE QUESTIONS
Recommended textbooks for you
Database System Concepts
Database System Concepts
Computer Science
ISBN:
9780078022159
Author:
Abraham Silberschatz Professor, Henry F. Korth, S. Sudarshan
Publisher:
McGraw-Hill Education
Starting Out with Python (4th Edition)
Starting Out with Python (4th Edition)
Computer Science
ISBN:
9780134444321
Author:
Tony Gaddis
Publisher:
PEARSON
Digital Fundamentals (11th Edition)
Digital Fundamentals (11th Edition)
Computer Science
ISBN:
9780132737968
Author:
Thomas L. Floyd
Publisher:
PEARSON
C How to Program (8th Edition)
C How to Program (8th Edition)
Computer Science
ISBN:
9780133976892
Author:
Paul J. Deitel, Harvey Deitel
Publisher:
PEARSON
Database Systems: Design, Implementation, & Manag…
Database Systems: Design, Implementation, & Manag…
Computer Science
ISBN:
9781337627900
Author:
Carlos Coronel, Steven Morris
Publisher:
Cengage Learning
Programmable Logic Controllers
Programmable Logic Controllers
Computer Science
ISBN:
9780073373843
Author:
Frank D. Petruzella
Publisher:
McGraw-Hill Education