OiO.lk Community platform!

Oio.lk is an excellent forum for developers, providing a wide range of resources, discussions, and support for those in the developer community. Join oio.lk today to connect with like-minded professionals, share insights, and stay updated on the latest trends and technologies in the development field.
  You need to log in or register to access the solved answers to this problem.
  • You have reached the maximum number of guest views allowed
  • Please register below to remove this limitation

How can i fix my face-recognition model error?

  • Thread starter Thread starter MAHMOUD JAMEEL ATTA DAASAN
  • Start date Start date
M

MAHMOUD JAMEEL ATTA DAASAN

Guest
I have this current code for face recognition model i keep facing error when i want to start and train my model probably because of problem related to the triplet loss and anchor i could seem to be able to fix it

Code:
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

# Function to preprocess images
def preprocess_image(image_path, target_size=(160, 160)):
    img = load_img(image_path, target_size=target_size)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)  # Add a new dimension for batch size
    img = preprocess_input(img)
    return img

# Custom triplet loss function
def triplet_loss(y_true, y_pred, alpha=0.2):
    anchor, positive, negative = y_pred[:, 0], y_pred[:, 1], y_pred[:, 2]
    pos_dist = tf.reduce_sum(tf.square(anchor - positive), axis=-1)
    neg_dist = tf.reduce_sum(tf.square(anchor - negative), axis=-1)
    basic_loss = pos_dist - neg_dist + alpha
    loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
    return loss

# Create FaceNet model
def create_facenet_model():
    base_model = InceptionResNetV2(include_top=False, input_shape=(160, 160, 3))
    x = Flatten()(base_model.output)
    x = Dense(128, activation='relu')(x)
    model = Model(inputs=base_model.input, outputs=x)
    return model

# Load images from dataset
def load_dataset(dataset_path):
    images, labels = [], []
    label_map = {}
    label_counter = 0

    for person_name in os.listdir(dataset_path):
        person_path = os.path.join(dataset_path, person_name)
        if os.path.isdir(person_path):
            for image_name in os.listdir(person_path):
                image_path = os.path.join(person_path, image_name)
                img = preprocess_image(image_path)
                images.append(img)
                if person_name not in label_map:
                    label_map[person_name] = label_counter
                    label_counter += 1
                labels.append(label_map[person_name])

    return np.vstack(images), np.array(labels), label_map

# Prepare training data for triplet loss
def create_triplet_data(images, labels):
    triplets = []
    for i in range(len(images)):
        anchor = images[i]
        positive_index = np.where(labels == labels[i])[0]
        positive_index = positive_index[positive_index != i]
        if len(positive_index) == 0:
            continue
        positive = images[positive_index[0]]
        negative_index = np.where(labels != labels[i])[0]
        negative = images[negative_index[0]]
        triplets.append([anchor, positive, negative])
    return np.array(triplets)

# Training loop function
def train_model(dataset_path, epochs=100, batch_size=32):
    # Load the dataset
    images, labels, label_map = load_dataset(dataset_path)

    # Create triplet data
    triplet_data = create_triplet_data(images, labels)

    # Prepare data
    anchors = np.vstack([triplet[0] for triplet in triplet_data])
    positives = np.vstack([triplet[1] for triplet in triplet_data])
    negatives = np.vstack([triplet[2] for triplet in triplet_data])

    # Create and compile the model
    facenet_model = create_facenet_model()
    optimizer = Adam()

    # Model checkpoint to save the best model
    checkpoint = ModelCheckpoint('facenet_model.keras', monitor='loss', save_best_only=True, mode='min')

    # Early stopping to stop training if no improvement in loss after 10 epochs
    early_stopping = EarlyStopping(monitor='loss', patience=10, mode='min', restore_best_weights=True)

    for epoch in range(epochs):
        print(f"Epoch {epoch + 1}/{epochs}")
        losses = []
        for i in range(0, len(anchors), batch_size):
            anchor_batch = anchors[i:i + batch_size]
            positive_batch = positives[i:i + batch_size]
            negative_batch = negatives[i:i + batch_size]

            # Check if the current batch is full
            if len(anchor_batch) != batch_size:
                continue

            # Convert to tensors
            anchor_batch = tf.convert_to_tensor(anchor_batch)
            positive_batch = tf.convert_to_tensor(positive_batch)
            negative_batch = tf.convert_to_tensor(negative_batch)

            with tf.GradientTape() as tape:
                anchor_embeddings = facenet_model(anchor_batch, training=True)
                positive_embeddings = facenet_model(positive_batch, training=True)
                negative_embeddings = facenet_model(negative_batch, training=True)

                y_pred = tf.stack([anchor_embeddings, positive_embeddings, negative_embeddings], axis=1)
                loss = triplet_loss(None, y_pred)
                losses.append(loss)

            grads = tape.gradient(loss, facenet_model.trainable_weights)
            optimizer.apply_gradients(zip(grads, facenet_model.trainable_weights))

        avg_loss = np.mean(losses)
        print(f"Loss: {avg_loss}")

        # Checkpoint and early stopping
        checkpoint.on_epoch_end(epoch, logs={'loss': avg_loss})
        early_stopping.on_epoch_end(epoch, logs={'loss': avg_loss})
        if early_stopping.stopped_epoch > 0:
            break

    # Save the trained model
    facenet_model.save('facenet_model_final.h5')

# Define the dataset path
dataset_path = 'training'  # Adjust this to your dataset path

# Train the model
train_model(dataset_path)

this is my code while i receive this error

2024-06-22 20:41:04.726598: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable TF_ENABLE_ONEDNN_OPTS=0. 2024-06-22 20:41:05.097851: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable TF_ENABLE_ONEDNN_OPTS=0. 2024-06-22 20:41:07.215652: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. WARNING:tensorflow:From C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\backend\tensorflow\core.py:184: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.

Epoch 1/100 Traceback (most recent call last): File "c:\Users\User\Desktop\WORK\master project codes\Custom model creation Face recognition# Datacollection & trainning.py", line 140, in train_model(dataset_path) File "c:\Users\User\Desktop\WORK\master project codes\Custom model creation Face recognition# Datacollection & trainning.py", line 113, in train_model anchor_embeddings = facenet_model(anchor_batch, training=True) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\utils\traceback_utils.py", line 122, in error_handler raise e.with_traceback(filtered_tb) from None File "C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\layers\input_spec.py", line 245, in assert_input_compatibility
raise ValueError( ValueError: Input 0 of layer "functional_1" is incompatible with the layer: expected shape=(None, 160, 160, 3), found shape=(32, 160, 3)

i want to be able to train my model for face recognition so in future i want to build bigger neural model faster and more accurate
<p>I have this current code for face recognition model i keep facing error when i want to start and train my model probably because of problem related to the triplet loss and anchor i could seem to be able to fix it</p>
<pre><code>import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

# Function to preprocess images
def preprocess_image(image_path, target_size=(160, 160)):
img = load_img(image_path, target_size=target_size)
img = img_to_array(img)
img = np.expand_dims(img, axis=0) # Add a new dimension for batch size
img = preprocess_input(img)
return img

# Custom triplet loss function
def triplet_loss(y_true, y_pred, alpha=0.2):
anchor, positive, negative = y_pred[:, 0], y_pred[:, 1], y_pred[:, 2]
pos_dist = tf.reduce_sum(tf.square(anchor - positive), axis=-1)
neg_dist = tf.reduce_sum(tf.square(anchor - negative), axis=-1)
basic_loss = pos_dist - neg_dist + alpha
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
return loss

# Create FaceNet model
def create_facenet_model():
base_model = InceptionResNetV2(include_top=False, input_shape=(160, 160, 3))
x = Flatten()(base_model.output)
x = Dense(128, activation='relu')(x)
model = Model(inputs=base_model.input, outputs=x)
return model

# Load images from dataset
def load_dataset(dataset_path):
images, labels = [], []
label_map = {}
label_counter = 0

for person_name in os.listdir(dataset_path):
person_path = os.path.join(dataset_path, person_name)
if os.path.isdir(person_path):
for image_name in os.listdir(person_path):
image_path = os.path.join(person_path, image_name)
img = preprocess_image(image_path)
images.append(img)
if person_name not in label_map:
label_map[person_name] = label_counter
label_counter += 1
labels.append(label_map[person_name])

return np.vstack(images), np.array(labels), label_map

# Prepare training data for triplet loss
def create_triplet_data(images, labels):
triplets = []
for i in range(len(images)):
anchor = images
positive_index = np.where(labels == labels)[0]
positive_index = positive_index[positive_index != i]
if len(positive_index) == 0:
continue
positive = images[positive_index[0]]
negative_index = np.where(labels != labels)[0]
negative = images[negative_index[0]]
triplets.append([anchor, positive, negative])
return np.array(triplets)

# Training loop function
def train_model(dataset_path, epochs=100, batch_size=32):
# Load the dataset
images, labels, label_map = load_dataset(dataset_path)

# Create triplet data
triplet_data = create_triplet_data(images, labels)

# Prepare data
anchors = np.vstack([triplet[0] for triplet in triplet_data])
positives = np.vstack([triplet[1] for triplet in triplet_data])
negatives = np.vstack([triplet[2] for triplet in triplet_data])

# Create and compile the model
facenet_model = create_facenet_model()
optimizer = Adam()

# Model checkpoint to save the best model
checkpoint = ModelCheckpoint('facenet_model.keras', monitor='loss', save_best_only=True, mode='min')

# Early stopping to stop training if no improvement in loss after 10 epochs
early_stopping = EarlyStopping(monitor='loss', patience=10, mode='min', restore_best_weights=True)

for epoch in range(epochs):
print(f"Epoch {epoch + 1}/{epochs}")
losses = []
for i in range(0, len(anchors), batch_size):
anchor_batch = anchors[i:i + batch_size]
positive_batch = positives[i:i + batch_size]
negative_batch = negatives[i:i + batch_size]

# Check if the current batch is full
if len(anchor_batch) != batch_size:
continue

# Convert to tensors
anchor_batch = tf.convert_to_tensor(anchor_batch)
positive_batch = tf.convert_to_tensor(positive_batch)
negative_batch = tf.convert_to_tensor(negative_batch)

with tf.GradientTape() as tape:
anchor_embeddings = facenet_model(anchor_batch, training=True)
positive_embeddings = facenet_model(positive_batch, training=True)
negative_embeddings = facenet_model(negative_batch, training=True)

y_pred = tf.stack([anchor_embeddings, positive_embeddings, negative_embeddings], axis=1)
loss = triplet_loss(None, y_pred)
losses.append(loss)

grads = tape.gradient(loss, facenet_model.trainable_weights)
optimizer.apply_gradients(zip(grads, facenet_model.trainable_weights))

avg_loss = np.mean(losses)
print(f"Loss: {avg_loss}")

# Checkpoint and early stopping
checkpoint.on_epoch_end(epoch, logs={'loss': avg_loss})
early_stopping.on_epoch_end(epoch, logs={'loss': avg_loss})
if early_stopping.stopped_epoch > 0:
break

# Save the trained model
facenet_model.save('facenet_model_final.h5')

# Define the dataset path
dataset_path = 'training' # Adjust this to your dataset path

# Train the model
train_model(dataset_path)

</code></pre>
<p>this is my code while i receive this error</p>
<p>2024-06-22 20:41:04.726598: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable <code>TF_ENABLE_ONEDNN_OPTS=0</code>.
2024-06-22 20:41:05.097851: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable <code>TF_ENABLE_ONEDNN_OPTS=0</code>.
2024-06-22 20:41:07.215652: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
WARNING:tensorflow:From C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\backend\tensorflow\core.py:184: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.</p>
<p>Epoch 1/100
Traceback (most recent call last):
File "c:\Users\User\Desktop\WORK\master project codes\Custom model creation Face recognition# Datacollection & trainning.py", line 140, in
train_model(dataset_path)
File "c:\Users\User\Desktop\WORK\master project codes\Custom model creation Face recognition# Datacollection & trainning.py", line 113, in train_model
anchor_embeddings = facenet_model(anchor_batch, training=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\utils\traceback_utils.py", line 122, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\User\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\keras\src\layers\input_spec.py", line 245, in assert_input_compatibility<br />
raise ValueError(
ValueError: Input 0 of layer "functional_1" is incompatible with the layer: expected shape=(None, 160, 160, 3), found shape=(32, 160, 3)</p>
<p>i want to be able to train my model for face recognition so in future i want to build bigger neural model faster and more accurate</p>
 

Latest posts

I
Replies
0
Views
1
impact christian
I
Top