-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcartpool.py
More file actions
51 lines (40 loc) · 1.39 KB
/
cartpool.py
File metadata and controls
51 lines (40 loc) · 1.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#This is implementation of CartPole-v1 in GYM enviroment
import gym
import numpy as np
import keras
import lumusu.Agents.BasicAgent as Agent
import os
EPISODES = 400
if __name__ == '__main__':
env = gym.make('CartPole-v1')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = Agent.CartpoolAgent_(state_size, action_size)
done = False
batchsize = 32
# If you want to save a weight or load a weight make the variable true
saveWeights = True
loadWeights = False
if loadWeights:
cachedFolder = "savedweights/"
agent.load(cachedFolder + "cartpole-dqn.h5")
for episode in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
env.render()
action = agent.play(state) # playing a move as per our present state
next_state , reward , done , _ = env.step(action)
reward = reward if not done else -10 # reward is -10 when we loose the game
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state,action,reward,next_state,done)
state = next_state
if done:
print ("Episode : {}/{} , Score : {} , epsilon : {:.2}".format(episode,EPISODES,time,agent.epsilon))
break
if len(agent.memory) > batchsize:
agent.replayMemory(batchsize)
if saveWeights:
cachedFolder = "savedweights/"
os.system('mkdir -p ' + cachedFolder)
agent.save(cachedFolder + "cartpole-dqn.h5")