TransWikia.com

ValueError: Input 0 of layer sequential is incompatible with the layer: : expected min_ndim=4, found ndim=2. Full shape received: [None, 1]

Stack Overflow Asked by Swati on February 10, 2021

    file_list = []
    class_list = []
    
    DATADIR = "C://Users//SB//Python_Programs//Image_Classifications//Data"
    
    # All the categories you want your neural network to detect
    CATEGORIES = ["DealBills", "RX"]
    
    # The size of the images that your neural network will use
    IMG_SIZE = 299
    
    # Checking or all images in the data folder
    for category in CATEGORIES :
        path = os.path.join(DATADIR, category)
        for img in os.listdir(path):
            img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                    
    
    training_data = []
    
    def create_training_data():
        for category in CATEGORIES :
            path = os.path.join(DATADIR, category)
            class_num = CATEGORIES.index(category)
            for img in os.listdir(path):
                try :
                    img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
                    new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                    training_data.append([new_array, class_num])
                except Exception as e:
                    pass
    
    create_training_data()
    
    random.shuffle(training_data)
    
    X = [] #features
    y = [] #labels
    
    for features, label in training_data:
        X.append(features)
        y.append(label)
        
    
    X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
    
    # Creating the files containing all the information about your model
    pickle_out = open("X.pickle", "wb")
    pickle.dump(X, pickle_out)
    pickle_out.close()
    
    pickle_out = open("y.pickle", "wb")
    pickle.dump(y, pickle_out)
    pickle_out.close()
    
    pickle_in = open("X.pickle", "rb")
    X = pickle.load(pickle_in)
    
    # Opening the files about data
    X = pickle.load(open("X.pickle", "rb"))
    y = pickle.load(open("y.pickle", "rb"))
    
    # normalizing data (a pixel goes from 0 to 255)
    X = X/255.0
    
    # Building the model
    model = Sequential()
    # 3 convolutional layers
    #model.add(Conv2D(32, (3, 3), input_shape = X.shape[1:]))
    model.add(Conv2D(32, (3, 3), input_shape = (299,299,1)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.25))
    
    # 2 hidden layers
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation("relu"))
    
    model.add(Dense(128))
    model.add(Activation("relu"))
    
    # The output layer with 13 neurons, for 13 classes
    model.add(Dense(13))
    model.add(Activation("softmax"))
    
    # Compiling the model using some basic parameters
    model.compile(loss="sparse_categorical_crossentropy",
                    optimizer="adam",
                    metrics=["accuracy"])
    
    y = np.array(y)
    #X = np.array(X)
    
    #X = X.reshape(-1,IMG_SIZE,IMG_SIZE,1)
       
    # Training the model, with 40 iterations
    # validation_split corresponds to the percentage of images used for the validation phase compared to all the images
    history = model.fit(X, y, batch_size=10, epochs=20, validation_split=0.1)
    
    # Saving the model
    model_json = model.to_json()
    with open("model.json", "w") as json_file :
        json_file.write(model_json)
    
    model.save_weights("model.h5")
    print("Saved model to disk")
    
    model.save('CNN.model')
    
    ###
    # Printing a graph showing the accuracy changes during the training phase
    #print(history.history.keys())
    #plt.figure(1)
    #plt.plot(history.history['acc'])
    #plt.plot(history.history['val_acc'])
    #plt.title('model accuracy')
    #plt.ylabel('accuracy')
    #plt.xlabel('epoch')
    #plt.legend(['train', 'validation'], loc='upper left')
    ###
    
    CATEGORIES = ["DealBills", "RX"]
    def prepare(file):
        IMG_SIZE = 299
        img_array = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
        new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
        return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
    model = tf.keras.models.load_model("CNN.model")
    
    
    image = 'C://Users//SB//Python_Programs//Image_Classifications//Datasets//Images//DealBill//42979929_25.jpg' #your image path
    
    prediction = model.predict([image])
    prediction = list(prediction[0])
    print(CATEGORIES[prediction.index(max(prediction))])

Query – Please find above code. I am getting error

"ValueError: Input 0 of layer sequential is incompatible with the layer: : expected min_ndim=4, found ndim=2. Full shape received: [None, 1]" for line prediction = model.predict([image])

Can anybody please help, I am trying to build model for Document images like different forms, invoices to classify #different documents.

Add your own answers!

Ask a Question

Get help from others!

© 2024 TransWikia.com. All rights reserved. Sites we Love: PCI Database, UKBizDB, Menu Kuliner, Sharing RPP