OiO.lk Community platform!

Oio.lk is an excellent forum for developers, providing a wide range of resources, discussions, and support for those in the developer community. Join oio.lk today to connect with like-minded professionals, share insights, and stay updated on the latest trends and technologies in the development field.
  You need to log in or register to access the solved answers to this problem.
  • You have reached the maximum number of guest views allowed
  • Please register below to remove this limitation

Agent won't learn no matter what I try

  • Thread starter Thread starter Dawn of Justize
  • Start date Start date
D

Dawn of Justize

Guest
I am trying to make an RL model for reinforcement learning, and the environment is fairly simple and a 10x10 grid. During the training phase, it reaches the goal. But when I try to test this, The agent takes one direction and goes straight and hits a wall or obstacle. It won't change direction. And this goes on in a loop. My reward function is based on distance from the current position to the goal and penalties are given for moving away from the goal and hitting anything. The following given is my model class.

Code:
class DQNTrainer:
    def __init__(self, states, actions, visualize=True, load_model=False):
        self.states = states
        self.actions = actions
        self.model = self.create_model()
        if load_model:
            self.load_model()
        self.agent = self.create_agent()
        self.env = None
        self.visualize = visualize

    def create_model(self):
        model = Sequential()
        model.add(Flatten(input_shape=(1, self.states, self.states)))
        model.add(Dense(128, activation="relu", kernel_regularizer=l1(0.01)))  # Add L1 regularization
        model.add(Dense(128, activation="relu", kernel_regularizer=l1(0.01)))
        model.add(Dense(64, activation="relu", kernel_regularizer=l1(0.01)))
        model.add(Dense(self.actions, activation="linear"))
        return model

    def create_agent(self):
        policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), 
                                  attr='eps', 
                                  value_max=1.0, 
                                  value_min=0.1, 
                                  value_test=0.01, 
                                  nb_steps=10000)
        return DQNAgent(
            model=self.model,
            memory=SequentialMemory(limit=20000, window_length=1),
            policy=policy,
            #policy = EpsGreedyQPolicy(0.1),
            nb_actions=self.actions,
            nb_steps_warmup=500,
            target_model_update=0.005,
            gamma=0.99
        )

    def compile_agent(self):
        self.agent.compile(optimizer=Adam(learning_rate=0.0005), 
                           metrics=['mean_absolute_error'])

    def fit(self, env, nb_steps):
        self.env = env
        self.agent.fit(self.env, nb_steps=nb_steps, visualize=self.visualize, verbose=1)

    def train(self, env, nb_steps):
        self.compile_agent()
        self.fit(env, nb_steps)
        return True

    def save_model(self):
        self.model.save_weights('pathplanner.h5', overwrite=True)

    def load_model(self):
        try:
            self.model.load_weights('pathplanner.h5')
            print("Model Loading Successful")
        except Exception as e:
            print(f"Error loading model: {e}")
<p>I am trying to make an RL model for reinforcement learning, and the environment is fairly simple and a 10x10 grid. During the training phase, it reaches the goal. But when I try to test this, The agent takes one direction and goes straight and hits a wall or obstacle. It won't change direction. And this goes on in a loop. My reward function is based on distance from the current position to the goal and penalties are given for moving away from the goal and hitting anything. The following given is my model class.</p>
<pre><code>class DQNTrainer:
def __init__(self, states, actions, visualize=True, load_model=False):
self.states = states
self.actions = actions
self.model = self.create_model()
if load_model:
self.load_model()
self.agent = self.create_agent()
self.env = None
self.visualize = visualize

def create_model(self):
model = Sequential()
model.add(Flatten(input_shape=(1, self.states, self.states)))
model.add(Dense(128, activation="relu", kernel_regularizer=l1(0.01))) # Add L1 regularization
model.add(Dense(128, activation="relu", kernel_regularizer=l1(0.01)))
model.add(Dense(64, activation="relu", kernel_regularizer=l1(0.01)))
model.add(Dense(self.actions, activation="linear"))
return model

def create_agent(self):
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
attr='eps',
value_max=1.0,
value_min=0.1,
value_test=0.01,
nb_steps=10000)
return DQNAgent(
model=self.model,
memory=SequentialMemory(limit=20000, window_length=1),
policy=policy,
#policy = EpsGreedyQPolicy(0.1),
nb_actions=self.actions,
nb_steps_warmup=500,
target_model_update=0.005,
gamma=0.99
)

def compile_agent(self):
self.agent.compile(optimizer=Adam(learning_rate=0.0005),
metrics=['mean_absolute_error'])

def fit(self, env, nb_steps):
self.env = env
self.agent.fit(self.env, nb_steps=nb_steps, visualize=self.visualize, verbose=1)

def train(self, env, nb_steps):
self.compile_agent()
self.fit(env, nb_steps)
return True

def save_model(self):
self.model.save_weights('pathplanner.h5', overwrite=True)

def load_model(self):
try:
self.model.load_weights('pathplanner.h5')
print("Model Loading Successful")
except Exception as e:
print(f"Error loading model: {e}")
</code></pre>
 

Latest posts

D
Replies
0
Views
1
Dhanushka Amarakoon
D
S
Replies
0
Views
1
Shikhar Ambashta
S
Top