reinforcement-learning #1

Merged
rodude123 merged 4 commits from reinforcement-learning into master 2023-09-28 23:59:04 +01:00
20 changed files with 1663 additions and 1495 deletions
Showing only changes of commit 5b253369ee - Show all commits

6
.idea/other.xml Normal file
View File

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PySciProjectComponent">
<option name="PY_SCI_VIEW_SUGGESTED" value="true" />
</component>
</project>

100
changeInRewards-3.txt Normal file
View File

@ -0,0 +1,100 @@
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0

100
changeInRewards-5.txt Normal file
View File

@ -0,0 +1,100 @@
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0

126
main.py
View File

@ -14,21 +14,45 @@ WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Draughts")
def getRowColFromMouse(pos):
def getRowColFromMouse(pos: dict) -> tuple:
"""
Gets the row and column from the mouse position
:param pos: X and Y position of the mouse
:return: Row and column
"""
x, y = pos
row = y // SQUARE_SIZE
col = x // SQUARE_SIZE
return row, col
def drawText(text, font, color, surface, x, y):
textobj = font.render(text, 1, color)
def drawText(text: str, font: pygame.font.SysFont, colour: tuple, surface: pygame.display, x: float, y: int) -> None:
"""
Draws text on the screen
:param text: Text to draw
:param font: System font
:param colour: Colour of the text
:param surface: The display surface
:param x: X position of the text
:param y: Y position of the text
:return None
"""
textobj = font.render(text, 1, colour)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
def drawMultiLineText(surface, text, pos, font, color=pygame.Color('black')):
def drawMultiLineText(surface: pygame.display, text: str, pos: dict, font: pygame.font.SysFont, colour: tuple = pygame.Color('black')) -> None:
"""
Draws multiline text on the screen
:param surface: the display surface
:param text: text to draw
:param pos: X and Y position of the text
:param font: System font
:param colour: colour of the text
:return None
"""
words = [word.split(' ') for word in text.splitlines()] # 2D array where each row is a list of words.
space = font.size(' ')[0] # The width of a space.
max_width, max_height = surface.get_size()
@ -36,7 +60,7 @@ def drawMultiLineText(surface, text, pos, font, color=pygame.Color('black')):
word_height = None
for line in words:
for word in line:
word_surface = font.render(word, 0, color)
word_surface = font.render(word, 0, colour)
word_width, word_height = word_surface.get_size()
if x + word_width >= max_width:
x = pos[0] # Reset the x.
@ -47,7 +71,12 @@ def drawMultiLineText(surface, text, pos, font, color=pygame.Color('black')):
y += word_height # Start on new row.
def main(difficulty=0):
def main(difficulty: int = 0) -> None:
"""
Main function, that shows the menu before running the game
:param difficulty: difficulty of minimax
:return: None
"""
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
menuClock = pygame.time.Clock()
@ -114,7 +143,11 @@ def main(difficulty=0):
game(difficulty)
def rulesGUI():
def rulesGUI() -> None:
"""
Shows the rules of the game
:return: None
"""
screen = pygame.display.set_mode((WIDTH, HEIGHT))
menuClock = pygame.time.Clock()
click = False
@ -174,17 +207,21 @@ multi-jump until the next move.""", (50, 50), font)
menuClock.tick(60)
def game(difficulty):
def game(difficulty: int) -> None:
"""
Runs the game with the given difficulty. Used for training and testing the RL algorithm
:param difficulty: The difficulty of the minimax algorithm
"""
run = True
clock = pygame.time.Clock()
gameManager = GameManager(WIN, GREEN)
rl = ReinforcementLearning(gameManager.board.getAllMoves(WHITE), gameManager.board, WHITE, gameManager)
model = rl.buildMainModel()
model.load_weights("./modelWeights/model_final.h5")
# model = rl.buildMainModel()
rl.model.load_weights("./modelWeights/model_final.h5")
mm = MiniMax()
totalReward = []
winners = []
for i in range(100):
for i in range(50):
score = 0
for j in range(200):
print(j)
@ -194,22 +231,12 @@ def game(difficulty):
# mm = MiniMax()
# value, newBoard = mm.AI(difficulty, WHITE, gameManager)
# gameManager.aiMove(newBoard)
# reward, newBoard = rl.AI(gameManager.board)
actionSpace = rl.encodeMoves(WHITE, gameManager.board)
if len(actionSpace) == 0:
# reward, newBoard = rl.AITrain(gameManager.board)
newBoard = rl.AITest(gameManager.board)
if newBoard is None:
print("Cannot make move")
continue
totalMoves = len(actionSpace)
# moves = np.squeeze(moves)
moves = np.pad(actionSpace, (0, rl.maxSize - totalMoves), 'constant', constant_values=(1, 1))
act_values = model.predict(rl.normalise(moves))
val = np.argmax(act_values[0])
val = val if val < totalMoves else totalMoves - 1
reward, newBoard, done = gameManager.board.step(actionSpace[val], WHITE)
# if newBoard is None:
# print("Cannot make move")
# continue
gameManager.aiMove(newBoard)
gameManager.update()
@ -223,8 +250,8 @@ def game(difficulty):
if gameManager.winner() is not None:
print("Green" if gameManager.winner() == GREEN else "White", " wins")
with open("winners.txt", "a+") as f:
f.write(str(gameManager.winner()) + "\n")
# with open(f"winners-{difficulty}.txt", "a+") as f:
# f.write(str(gameManager.winner()) + "\n")
winners.append(gameManager.winner())
break
@ -241,34 +268,55 @@ def game(difficulty):
pygame.display.update()
if gameManager.winner() is None:
with open("winners.txt", "a+") as f:
f.write(str(0) + "\n")
# with open(f"winners-{difficulty}.txt", "a+") as f:
# f.write(str(0) + "\n")
winners.append(0)
gameManager.reset()
rl.resetScore()
print("Game: ", i, " Reward: ", score)
with open("rewards.txt", "a+") as f:
f.write(str(score) + "\n")
# with open(f"rewards-{difficulty}.txt", "a+") as f:
# f.write(str(score) + "\n")
totalReward.append(score)
# save model weights every 25 games
if i % 250 == 0 and i != 0:
rl.model.save("./modelWeights/model_" + str(i) + ".h5")
# if i % 250 == 0 and i != 0:
# rl.model.save("./modelWeights/model_" + str(i) + ".h5")
# pygame.quit()
rl.model.save("./modelWeights/model_final.h5")
# rl.model.save("./modelWeights/model_final.h5")
change_in_rewards = [0] # Initialize with 0 for the first episode
for i in range(1, len(totalReward)):
change_in_reward = totalReward[i] - totalReward[i - 1]
change_in_rewards.append(change_in_reward)
plt.plot([i for i in range(len(totalReward))], totalReward)
plt.xlabel("Games")
plt.ylabel("Reward")
plt.show()
# with open(f"changeInRewards-{difficulty}.txt", "a+") as f:
# for i in change_in_rewards:
# f.write(str(i) + "\n")
# episodes = list(range(1, len(totalReward) + 1))
#
# plt.plot(episodes, change_in_rewards)
# plt.xlabel('Training Games')
# plt.ylabel('Change in Game Reward')
# plt.title('Change in Game Reward vs. Training Games')
# plt.grid(True)
# plt.show()
#
# plt.plot([i for i in range(len(totalReward))], totalReward)
# plt.xlabel("Games")
# plt.ylabel("Reward")
# plt.show()
fig, ax = plt.subplots()
bar = ax.bar(["Draw", "White", "Green"], [winners.count(0), winners.count(WHITE), winners.count(GREEN)])
ax.set(xlabel='Winner', ylabel='Frequency', ylim=[0, 500])
ax.set_title("Winners")
ax.set_title(f"Winners for difficulty — {difficulty}")
ax.bar_label(bar)
plt.show()
# difficulties = [3, 5, 7, 9]
#
# for diff in difficulties:
# main(diff)
main(3)

View File

@ -2,11 +2,19 @@ import random
from math import inf
from utilities.constants import GREEN, WHITE
from utilities.gameManager import GameManager
class MiniMax:
def AI(self, depth, maxPlayer, gameManager):
def AI(self, depth: int, maxPlayer: int, gameManager: GameManager) -> tuple:
"""
The minimax algorithm
:param depth: How deep the algorithm should go
:param maxPlayer: The current player
:param gameManager: The game manager
:return: the best evaluation and board
"""
if depth == 0 or gameManager.board.winner() is not None:
return gameManager.board.scoreOfTheBoard(), gameManager.board

Binary file not shown.

Binary file not shown.

View File

@ -22,8 +22,8 @@ class ReinforcementLearning():
def __init__(self, actionSpace: list, board: Board, colour: int, gameManager: GameManager) -> None:
"""
Constructor for the ReinforcementLearning class
:param actionSpace: the number of possible actions
:param board: the game board
:param actionSpace: The number of possible actions
:param board: The game board
"""
self.gameManager = gameManager
self.actionSpace = actionSpace
@ -33,7 +33,7 @@ class ReinforcementLearning():
self.score = 0
self.epsilon = 1
self.gamma = .95
self.batchSize = 256
self.batchSize = 512
self.maxSize = 32
self.epsilonMin = .01
self.epsilonDecay = .995
@ -42,10 +42,10 @@ class ReinforcementLearning():
self.model = self.buildMainModel()
print(self.model.summary())
def AI(self, board: Board) -> tuple:
def AITrain(self, board: Board) -> tuple:
"""
Learns to play the draughts game
:return: the loss
:return: The loss
"""
self.board = board
self.state = self._convertState(self.board.board)
@ -62,10 +62,29 @@ class ReinforcementLearning():
return self.score, nextState
def AITest(self, board: Board) -> Board:
"""
Runs the AI
:param board: The board
:return: The new board
"""
actionSpace = self.encodeMoves(WHITE, board)
if len(actionSpace) == 0:
print("Cannot make move")
return None
totalMoves = len(actionSpace)
# moves = np.squeeze(moves)
moves = np.pad(actionSpace, (0, self.maxSize - totalMoves), 'constant', constant_values=(1, 1))
act_values = self.model.predict(self.normalise(moves))
val = np.argmax(act_values[0])
val = val if val < totalMoves else totalMoves - 1
reward, newBoard, done = board.step(actionSpace[val], WHITE)
return newBoard
def buildMainModel(self) -> Sequential:
"""
Build the model for the AI
:return: the model
:return: The model
"""
# Board model
modelLayers = [
@ -93,7 +112,7 @@ class ReinforcementLearning():
def _replay(self) -> None:
"""
trains the model
:return: None (void)
:return: None
"""
if len(self.memory) < self.batchSize:
# Not enough data to replay and test the model
@ -132,19 +151,19 @@ class ReinforcementLearning():
def _remember(self, state: np.array, action: int, reward: float, nextState: np.array, done: bool) -> None:
"""
Remembers what it has learnt
:param state: the current state
:param action: the action taken
:param reward: the reward for the action
:param nextState: the next state
:param done: whether the game is finished
:return: None (void)
:param state: The current state
:param action: The action taken
:param reward: The reward for the action
:param nextState: The next state
:param done: Whether the game is finished
:return: None
"""
self.memory.append((state, action, reward, nextState, done))
def _act(self) -> Any:
"""
Chooses an action based on the available moves
:return: the action
:return: The action
"""
if np.random.rand() <= self.epsilon:
# choose a random action from the action spaces list
@ -159,12 +178,16 @@ class ReinforcementLearning():
return self.actionSpace[0]
encodedMoves = np.squeeze(self.actionSpace)
encodedMoves = np.pad(encodedMoves, (0, self.maxSize - len(encodedMoves)), 'constant', constant_values=(1, 1))
act_values = self.model.predict(self.normalise(encodedMoves))
val = np.argmax(act_values[0])
actValues = self.model.predict(self.normalise(encodedMoves))
val = np.argmax(actValues[0])
val = val if val < len(self.actionSpace) else len(self.actionSpace) - 1
return self.actionSpace[val]
def resetScore(self):
def resetScore(self) -> None:
"""
Resets the score
:return: None
"""
self.score = 0
def _convertState(self, board: list) -> list:
@ -195,9 +218,9 @@ class ReinforcementLearning():
def _encode(self, start: tuple, end: tuple) -> int:
"""
Encodes the move into an integer
:param start: tuple of start position
:param end: tuple of end position
:return: encoded move
:param start: Tuple of start position
:param end: Tuple of end position
:return: Encoded move
"""
start_row = start[0]
start_col = end[0]
@ -209,6 +232,10 @@ class ReinforcementLearning():
return int(str(start_row) + str(start_col) + str(end_row) + str(end_col))
def _maxNextQ(self) -> float:
"""
Calculates the max Q value for the next state
:return: the max Q value
"""
colour = WHITE if self.colour == GREEN else GREEN
encodedMoves = self.encodeMoves(colour, self.board)
if len(encodedMoves) == 0:
@ -220,9 +247,9 @@ class ReinforcementLearning():
def encodeMoves(self, colour: int, board: Board) -> list:
"""
Encodes the moves into a list encoded moves
:param colour: colour of the player
:param board: the board
:return: list of encoded moves
:param colour: Colour of the player
:param board: The board
:return: list Of encoded moves
"""
encodedMoves = []
moves = board.getAllMoves(colour)
@ -231,15 +258,23 @@ class ReinforcementLearning():
encodedMoves.append(self._encode(where[0]+1, where[1]+1))
return encodedMoves
def _boardDiff(self, board, move):
def _boardDiff(self, board: Board, move: Board) -> np.array:
"""
Finds the difference between the two boards
:param board: The current board
:param move: The new board
:return: the difference between the two boards
"""
cnvState = np.array(self._convertState(board.board))
cnvMove = np.array(self._convertState(move.board))
diff = np.subtract(cnvMove, cnvState)
diff = np.nonzero(diff)
return diff
def normalise(self, data):
def normalise(self, data: np.array) -> np.array:
"""
Normalise the data
:param data: the data to normalise
:return: normalised data
"""
return data / 10000

View File

@ -1,27 +1,80 @@
import matplotlib.pyplot as plt
import numpy as np
from utilities.constants import GREEN, WHITE
# winners = []
with open("winners.txt") as f:
with open("winners-5.txt", "r") as f:
winners = f.readlines()
winners = [int(x.strip()) for x in winners]
# lavg = []
# for i in range(0, len(winners), 25):
# lavg.append(winners[i:i+25].count(2) / 25)
#
# x = np.arange(0, len(lavg))
# y = np.array(lavg) * 100
#
# a, b = np.polyfit(x, y, 1)
#
# fig, ax = plt.subplots(figsize=(10, 5))
# ax.plot(y)
# ax.set_xticks(np.arange(0, len(lavg), 2))
# ax.minorticks_on()
# ax.plot(x, a*x+b, color='red', linestyle='--', linewidth=2)
# ax.set_ylim([0, 100])
# ax.set_title("Winners Average")
# ax.grid(which='major', linestyle='-', linewidth='0.5', color='black')
# ax.grid(which='minor', linestyle=':', linewidth='0.5')
# ax.set_xlabel("Average Set")
# ax.set_ylabel("Percentage of Wins")
# ax.tick_params(which="minor", bottom=False, left=False)
# plt.show()
fig, ax = plt.subplots()
bar = ax.bar(["Draw", "White", "Green"], [winners.count(0), winners.count(WHITE), winners.count(GREEN)])
ax.set(xlabel='Winner', ylabel='Frequency', ylim=[0, 500])
ax.set_title("Winners")
ax.set(xlabel='Winner', ylabel='Frequency', ylim=[0, 100])
ax.set_title("Winners at Depth 5")
ax.grid(which='major', linestyle='-', linewidth='0.5', color='grey', axis='y')
ax.bar_label(bar)
plt.show()
# with open("trainedRewards.txt", "r") as f:
# totalReward = f.readlines()
#
# totalReward = [float(x.strip()) for x in totalReward]
# filteredReward = list(filter(lambda x: x > -1500, totalReward))
with open("rewardsA.txt") as f:
totalReward = f.readlines()
# change_in_rewards = [0] # Initialize with 0 for the first episode
# for i in range(1, len(totalReward)):
# change_in_reward = totalReward[i] - totalReward[i - 1]
# change_in_rewards.append(change_in_reward)
#
# games = list(range(1, len(totalReward) + 1))
plt.plot([i for i in range(len(totalReward))], totalReward)
plt.xlabel("Games")
plt.ylabel("Reward")
plt.show()
# plt.plot(games, change_in_rewards)
# plt.xlabel('Training Games')
# plt.ylabel('Change in Game Reward')
# plt.title('Change in Game Reward vs. Training Games')
# plt.grid(True)
# plt.show()
# major_ticks = np.arange(0, 101, 20)
# minor_ticks = np.arange(0, 101, 5)
#
# plt.plot([i for i in range(len(totalReward))], totalReward)
# plt.title("Rewards to Games")
# plt.xlabel("Games")
# plt.ylabel("Reward")
# plt.xticks(major_ticks)
# plt.xticks(minor_ticks, minor=True)
# plt.yticks(major_ticks)
# plt.yticks(minor_ticks, minor=True)
# plt.grid(which='both')
# plt.show()
#
# plt.plot([i for i in range(len(filteredReward))], filteredReward)
# plt.title("Filtered Rewards to Games")
# plt.xlabel("Games")
# plt.ylabel("Reward")
# plt.grid(which='both')
# plt.show()

56
rewards-5.txt Normal file
View File

@ -0,0 +1,56 @@
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0

View File

@ -1,511 +0,0 @@
300.5
132.5
-20.89999999999999
160.90000000000003
83.0
-1619.300000000002
203.0000000000002
180.3
-76.10000000000002
207.60000000000008
234.6000000000001
95.89999999999996
198.10000000000005
-57.29999999999997
-121.5
27.0
133.49999999999997
-47.5
-3638.4999999999956
265.5
289.5
259.9000000000001
-7972.5
230.5
220.5
160.0
135.0
330.5
282.5
320.5
195.5
149.0
207.5
246.5
166.5
234.0
262.5
240.0
199.0
150.0
274.0
175.5
200.5
113.0
206.5
-323.0
116.5
270.0
60.0
203.0
78.0
275.0
-262.5
311.5
-18.0
-362.0
188.0
-4712.5
179.5
-75.0
-2201.5
-276.0
95.5
20.0
237.5
244.0
160.0
-1834.5
-54.5
102.5
-5285.0
150.5
137.5
-32.0
208.0
168.0
192.5
205.5
273.0
335.0
150.5
-2837.0
205.5
-182.5
138.0
22.5
316.5
170.0
72.5
149.0
137.5
110.5
137.5
25.5
112.5
96.5
-1635.0
-203.0
-239.5
240.0
203.0
112.5
115.5
309.0
227.5
-5185.0
-48.0
108.0
154.0
300.0
73.0
194.5
163.0
190.0
106.5
123.0
260.0
192.5
250.0
248.0
133.0
195.0
-4912.5
124.0
-53.0
138.0
185.5
-40.0
-116.5
256.00000000000017
97.5
77.5
197.5
94.5
135.5
-99.5
237.5
127.5
-420.0
170.0
100.0
150.5
-145.0
-65.0
136.5
188.5
-150.0
-275.0
-6480.0
188.0
217.5
121.5
213.0
15.5
-140.0
195.5
130.5
38.5
85.5
-5195.0
207.5
155.0
151.5
132.5
314.0
197.5
149.0
107.5
214.0
205.5
-5377.5
-1379.5
87.0
165.0
-92.5
281.5
238.0
162.5
158.0
200.5
-5247.5
-1065.0
383.0
164.0
-202.0
137.5
3.0
125.0
189.0
189.0
118.0
147.5
-5992.5
137.5
248.0
95.0
147.5
16.5
200.0
245.5
-73.0
172.5
219.0
-80.0
-40.0
131.5
110.0
45.0
174.0
211.5
-22.0
128.0
-5545.0
117.5
194.0
148.0
350.5
-399.5
219.0
177.5
105.0
142.5
215.0
127.5
-942.0
116.5
-5062.5
219.0
135.0
234.0
-1395.0
-47.5
273.0
225.0
207.0
180.5
140.0
205.5
165.0
202.5
-401.5
-1605.0
137.5
119.0
342.5
178.0
122.5
-92.0
-6.5
55.5
223.0
-1506.0
231.5
225.5
319.5
-52.5
101.5
222.5
176.5
165.5
-50.5
164.0
250.0
170.5
172.5
202.5
268.0
177.5
129.0
-13.5
-149.5
221.5
-5887.5
-367.5
120.5
245.5
176.5
193.0
274.0
-877.5
192.5
-114.0
173.0
285.0
59.5
227.5
238.0
-327.5
121.0
192.5
122.5
-5857.5
140.0
47.0
244.0
352.5
151.5
209.0
140.0
248.0
147.5
-17.5
-55.5
188.0
351.5
205.5
227.5
75.5
275.5
244.0
-6337.5
206.5
120.0
120.5
62.5
-110.0
22.5
362.5
197.5
-151.5
-5575.0
-2010.5
-5147.5
-405.5
222.0
173.0
193.0
380.5
268.0
152.5
211.5
-497.0
257.5
-76.0
178.0
-49.5
-101.0
255.5
225.0
337.5
251.5
174.5
-5635.0
94.0
115.0
-6140.0
-6875.0
193.0
-1403.5
21.5
135.5
-5210.0
-6030.0
93.0
401.5
341.5
-175.0
151.5
-49.0
-5655.0
277.5
-6127.5
90.0
145.0
173.0
-47.0
143.0
225.5
180.0
296.5
64.0
175.0
122.5
145.0
-2247.5
195.0
-294.5
188.0
170.5
-1730.0
217.5
-206.5
-6455.0
166.5
205.5
122.5
269.0
-5850.0
29.0
-110.0
129.0
197.5
-5835.0
105.5
163.0
93.0
198.0
192.5
-544.5
110.5
133.0
145.0
85.0
40.5
-295.0
275.0
-273.5
-275.0
-151.0
253.0
70.5
213.0
-1262.5
-166.0
-694.5
137.5
139.0
265.0
115.5
140.5
-9.0
217.5
135.0
188.0
103.0
312.5
363.0
107.5
120.5
10.5
-5020.0
220.0
245.0
192.5
48.0
95.5
-117.0
245.5
262.5
115.0
155.0
47.5
177.40000000000003
354.0
-2835.0
-155.0
194.0
138.0
241.5
180.5
-332.5
-5125.0
-5075.0
163.0
79.0
180.5
185.5
103.0
-460.0
270.5
235.5
163.0
315.0
255.0
113.0
175.0
50.0
110.0
323.0
317.5
-64.0
263.0
242.5
147.5
208.0
-62.0
31.5
-5622.5
128.0
321.5
163.0
255.5
109.0
-5800.0
-5177.5
188.0
220.5
237.5
-27.5
238.0
270.5
209.0
200.0
127.5
90.0
210.0
-117.5
237.5
225.0
280.0
216.20000000000005
-189.5
5.0
-86.80000000000005
-266.19999999999993
11.0
5.5
21.5
27.0
-72.5
9.9

View File

@ -1,500 +0,0 @@
18.5
18.8
275.6
-1.5
-149.0
71.70000000000027
-1462.0
266.3000000000002
154.40000000000003
229.19999999999996
-947.9000000000011
186.60000000000005
-181.09999999999985
94.89999999999993
120.39999999999995
-242.09999999999994
-4030.0
57.100000000000264
107.10000000000001
216.5
-187.5
167.90000000000003
181.40000000000003
-62.5
105.5
132.5
-115.0
-5192.5
100.5
295.5
-335.5
245.0
178.0
295.0
203.0
209.0
135.0
239.0
302.5
135.5
260.0
12.0
252.5
273.0
154.0
130.5
195.5
-1349.5
133.0
187.5
227.0
-783.5
223.0
-372.5
188.0
175.5
210.0
-5047.5
-53.0
191.5
156.5
134.0
210.0
206.5
177.5
302.5
-5577.5
109.0
210.0
319.0
200.0
232.5
-275.0
-5812.5
297.5
127.5
178.0
320.0
165.5
-87.5
175.5
60.0
113.0
342.5
-185.5
-50.0
261.5
265.5
106.5
240.0
268.0
-290.0
140.0
164.0
63.0
84.0
166.5
144.0
237.5
98.0
103.5
115.5
163.0
301.5
99.0
212.5
122.5
155.0
298.0
160.0
111.5
248.0
150.5
180.5
141.5
-75.0
225.5
107.5
220.5
160.5
-5420.0
-99.5
155.5
250.0
-155.5
-321.5
188.0
135.5
143.0
322.5
170.5
125.5
202.5
140.5
-393.5
105.0
163.0
134.0
157.5
158.0
185.5
-5630.0
125.5
190.5
202.5
279.0
152.5
223.0
155.0
198.0
174.0
-321.0
310.5
110.5
191.5
-664.5
151.5
178.0
235.5
202.5
214.0
-23.0
-155.0
267.5
199.0
93.0
225.0
-5925.0
145.0
120.0
283.0
37.5
80.0
350.0
148.0
-60.5
177.5
-82.5
-647.5
167.5
121.5
216.5
291.5
290.0
133.0
-25.5
210.5
-970.0
288.0
288.0
162.5
-932.5
9.0
-37.0
0.0
310.0
-97.0
-49.0
75.0
4.5
-32.5
-66.5
-108.5
147.5
200.5
262.5
127.5
-1756.0
-7.0
14.0
160.0
148.0
284.0
182.5
221.5
227.5
132.5
-5637.5
-4980.0
290.5
-423.5
240.0
-557.5
-130.0
-339.5
310.0
137.5
105.5
157.5
-6457.5
230.5
103.0
-5075.0
133.5
189.0
222.5
157.5
113.0
-635.0
8.0
112.5
-17.0
197.0
110.0
247.5
191.5
180.0
-2.5
233.0
137.5
129.0
168.0
158.0
252.5
-456.0
113.0
280.0
159.0
-5617.5
288.0
141.5
95.5
195.5
177.5
97.5
165.0
109.0
149.0
124.0
245.5
78.0
158.0
147.5
-99.5
209.0
238.0
95.0
-27.5
185.0
100.5
228.0
-1695.0
265.0
288.0
-164.5
117.5
160.0
-2533.5
-755.0
-212.0
270.0
-455.0
-325.0
-212.0
97.5
100.0
170.0
95.0
133.0
305.5
-47.5
128.0
148.0
-5637.5
267.5
279.0
170.5
35.0
143.0
131.5
230.0
-102.0
120.5
100.5
240.0
225.0
-862.5
94.5
150.5
55.5
176.5
-17.0
275.0
-15.0
170.5
-1231.0
165.0
-73.5
245.0
118.0
129.0
236.5
-372.5
-25.0
142.5
-195.0
210.0
8.0
113.0
260.0
23.0
132.5
110.5
151.5
140.5
247.5
188.0
229.0
362.5
249.0
-6527.5
-115.0
115.5
-5650.0
149.0
243.70000000000013
-837.0
185.5
218.0
28.5
250.0
197.5
218.0
47.5
177.5
-3872.0
155.0
-142.0
-20.0
107.5
-4294.5
338.0
160.0
123.0
-72.0
158.0
125.5
-5392.5
90.0
240.0
-52.5
95.5
-5055.0
283.0
130.5
240.5
128.0
375.0
250.5
208.0
-97.0
-2302.0
189.0
-6160.0
110.0
-65.5
208.0
230.5
328.0
167.5
-75.5
-5710.0
204.0
110.0
66.5
208.0
160.0
-283.0
125.0
293.0
140.0
162.5
-5.0
140.0
272.5
231.5
-1350.0
-124.5
270.0
189.0
234.0
385.5
303.09999999999957
168.0
187.5
133.0
308.0
234.0
140.0
17.5
160.0
80.0
188.0
159.0
-76.5
-52.0
225.5
160.0
22.5
-312.0
174.0
182.5
-5587.5
157.5
260.5
188.5
252.5
-5012.5
162.0
220.5
408.0
310.5
128.0
108.0
-321.0
130.0
-382.0
-6447.5
154.0
-67.5
245.0
277.5
-5155.0
210.5
-300.0
148.0
153.0
-191.0
95.5
20.0
145.5
197.5
-210.0
215.0
-50.0
177.5
298.0
-328.5
-61.0
204.0
138.0
130.0
-242.5
248.0
-5020.0
-6022.5
-4588.5
132.5
182.5
-117.5
-190.0
222.5
-5312.5
-5290.0
95.5
-177.5
230.5
135.0
157.5
221.40000000000006

500
trainedRewards.txt Normal file
View File

@ -0,0 +1,500 @@
180.5
115.19999999999999
-155.39999999999998
-5169.4000000000015
100.0
-3354.2999999999956
123.79999999999998
-1738.0
261.40000000000015
120.89999999999999
147.80000000000004
108.0
113.50000000000001
110.5000000000002
-1048.3000000000006
75.8
232.70000000000016
89.10000000000001
279.9000000000002
165.40000000000003
85.4
34.20000000000016
266.20000000000016
101.69999999999999
283.0
-264.5
225.0
328.0
215.5
150.0
-217.5
-2920.0
82.5
-208.5
150.5
196.5
223.0
265.5
-282.5
175.5
206.5
221.5
127.5
-6337.5
147.5
231.5
137.5
-180.5
108.0
-339.5
190.0
-69.0
52.5
58.0
-5575.0
-159.5
197.5
177.5
-5547.5
-65.5
136.5
292.5
-169.5
185.0
115.5
198.0
30.0
162.5
95.5
170.0
113.0
-1405.0
-27.0
-4832.199999999999
147.5
228.0
59.0
262.5
-220.0
150.5
177.5
140.0
123.0
119.0
137.5
134.0
175.5
-5598.5
46.5
135.0
205.0
186.5
177.5
120.1
332.5
162.5
122.5
262.5
-70.0
159.0
138.0
240.5
215.0
147.5
-118.0
260.5
199.0
130.0
265.0
142.5
230.0
135.0
197.5
-179.5
198.0
288.0
200.5
-222.5
165.5
139.0
228.0
211.5
197.5
102.5
233.0
95.5
-129.0
187.5
158.0
295.0
240.5
-222.5
-1841.5
198.0
113.0
305.0
-482.5
125.5
215.0
110.0
-180.0
170.0
-62.5
215.5
132.5
187.5
135.0
-65.0
138.0
-1972.0
240.5
-237.5
610.0
267.5
52.5
-211.5
217.5
88.0
305.5
165.5
115.0
182.5
-69.5
333.0
363.0
112.5
-15.5
150.5
118.0
-52.5
318.0
174.0
198.0
-5705.0
160.5
155.0
125.0
165.0
259.0
165.5
155.0
-236.0
220.5
-15.5
117.5
367.5
237.5
255.0
85.0
-5342.5
141.5
-3582.5
-600.0
915.5
179.0
190.0
-47.5
275.5
-5.0
195.0
128.0
146.5
750.5
153.0
-5157.5
-279.5
219.0
154.0
153.0
-234.5
248.0
182.5
122.5
155.5
1078.0
102.5
358.0
152.5
261.5
239.0
128.0
111.5
93.0
310.5
-87.0
158.0
113.0
165.5
120.0
256.5
90.5
245.0
159.0
160.0
-5272.0
-88.5
159.0
169.0
147.5
-1149.5
-372.0
-270.0
95.0
142.5
212.5
154.0
425.0
153.0
213.0
280.5
-80.5
-45.90000000000003
-2250.5
123.50000000000003
149.40000000000006
219.0
108.0
180.0
271.19999999999993
202.5
121.8000000000001
47.599999999999966
-35.0
281.5
307.5
99.80000000000001
154.0
166.30000000000004
271.5
205.5
145.5
265.0
113.0
144.0
88.0
-204.5
204.0
215.0
177.5
168.0
263.0
66.5
258.0
-5477.5
94.5
-139.0
190.5
160.0
-35.5
149.0
100.5
130.0
-40.0
175.0
132.5
107.5
143.0
-5097.5
97.5
-1880.0
-15.0
213.0
-601.0
282.5
276.5
113.0
106.5
-1011.5
128.0
150.0
145.5
233.0
209.0
136.5
240.0
7.5
-1535.0
238.0
185.0
157.5
-1660.0
-15.5
-145.0
178.0
-4997.5
182.5
197.5
355.5
130.0
232.5
-5420.0
190.0
128.0
115.0
2.5
149.0
220.0
-87.0
-447.5
-4122.5
-67.5
-425.0
283.0
925.0
49.5
-15.0
233.0
215.5
234.0
154.0
141.5
226.5
220.0
110.5
270.0
253.0
-1944.0
215.0
250.5
155.0
260.5
185.0
261.5
232.5
177.5
-97.5
-196.0
230.0
205.5
-367.0
265.5
180.0
135.5
139.0
103.0
314.0
192.5
179.0
97.5
52.5
135.0
184.0
-305.0
147.5
206.5
157.5
243.0
-6125.0
257.5
125.60000000000002
190.0
-6225.0
96.5
350.0
193.0
185.5
206.5
223.0
-225.0
117.5
170.0
223.0
175.5
210.0
-222.0
148.60000000000002
-133.0
-193.5
152.5
-152.0
-6245.0
-2.0
50.5
-140.5
185.5
125.5
208.0
-200.0
202.5
112.5
119.0
210.5
-1199.5
-2.5
102.5
0.5
275.5
135.5
-32.5
235.5
-617.0
110.0
222.5
-372.0
-53.0
306.5
117.5
-5095.0
223.0
-257.0
-5760.0
11.5
182.5
160.0
325.5
151.5
-327.5
-5655.0
62.5
-5550.0
195.0
92.5
-5290.0
215.0
59.0
179.0
188.0
-2103.5
253.0
118.0
335.0
85.0
207.5
229.0
152.5
-188.5
-177.5
274.0
220.0
-5169.5
128.0
-1260.0
140.0
147.5
140.0
-505.0
155.0
225.5
188.0
131.5
1111.5
180.0
-4897.5
-687.5
125.0
180.0
111.5
-5582.5
232.5
153.0
-130.5
102.5
189.0
157.5
-5685.0
325.0
-6870.0
-520.5
-3027.0
32.5

View File

@ -1,136 +1,8 @@
2
2
2
2
2
2
2
2
2
2
2
2
2
2
1
1
2
1
1
2
2
2
0
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
1
2
2
2
2
2
2
2
2
0
2
2
2
0
1
1
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
1
2
2
2
2
2
2
2
2
2
2
1
1
2
2
2
2
2
2
2
0
1
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
1
2
2
1
1
2
2
2
2
1
2
2
@ -143,10 +15,6 @@
2
2
2
0
2
2
0
2
2
2
@ -155,9 +23,12 @@
2
2
2
1
2
0
2
2
2
2
2
2
2
2
@ -170,74 +41,20 @@
2
0
2
1
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
1
2
2
2
1
1
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
0
2
2
2
2
2
1
2
2
@ -245,153 +62,12 @@
2
2
2
2
1
2
2
2
2
2
0
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
1
2
2
1
2
2
2
0
2
2
0
2
1
2
2
2
2
2
2
2
1
1
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
1
0
1
0
1
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
0
2
2
0
0
2
2
2
2
0
0
2
2
2
2
2
1
0
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
1
0
2
2
2
2
0
2
2
2
2
0
2
2
2
2
@ -424,33 +100,11 @@
2
2
2
1
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
0
2
2
2
@ -473,22 +127,53 @@
2
2
2
1
1
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
0
0
2
1
1
2
2
2
2
2
2
2
2
2
2
1
2
2
1
2
2
2
0
2
2
2
2
2
2
2
@ -500,6 +185,72 @@
2
2
2
0
2
2
2
0
2
2
1
2
2
2
2
2
0
2
0
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
22
2
2
1
2
1
2
2
@ -507,7 +258,13 @@
2
2
2
0
2
2
2
1
2
2
2
2
2
2
@ -526,6 +283,142 @@
2
2
0
1
1
2
2
1
2
2
2
1
2
2
2
2
0
2
2
2
2
1
2
2
2
2
1
2
2
2
2
2
2
2
2
1
2
2
2
1
1
2
2
0
2
2
2
2
2
0
2
2
2
1
2
2
1
2
2
2
1
2
0
1
2
2
2
2
2
2
2
2
2
2
2
1
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
0
2
2
2
2
2
2
2
2
3
2
2
2
2
2
1
2
2
2
0
2
2
1
2
2
2
2
2
2
2
2
2
@ -543,6 +436,7 @@
1
2
2
0
2
2
0
@ -551,6 +445,12 @@
2
2
2
2
0
2
0
2
2
0
2
2
@ -578,18 +478,6 @@
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
0
2
0
@ -597,14 +485,16 @@
2
2
2
0
2
2
2
2
2
2
2
2
2
1
2
0
2
0
2
0
1
2
2

View File

@ -1,3 +1,5 @@
from __future__ import annotations
import pygame
from copy import deepcopy
from .constants import BLACK, ROWS, GREEN, SQUARE_SIZE, COLS, WHITE
@ -5,20 +7,32 @@ from .piece import Piece
class Board:
def __init__(self):
def __init__(self) -> None:
"""
Constructor for the Board class
:return: None
"""
self.board = []
self.greenLeft = self.whiteLeft = 12
self.greenKings = self.whiteKings = 0
self.green = (144, 184, 59)
self._createBoard()
def _drawSquares(self, win):
def _drawSquares(self, win: pygame.display) -> None:
"""
Draws the squares on the board
:param win: The window
"""
win.fill(BLACK)
for row in range(ROWS):
for col in range(row % 2, ROWS, 2):
pygame.draw.rect(win, self.green, (row * SQUARE_SIZE, col * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))
def _createBoard(self):
def _createBoard(self) -> None:
"""
Creates a board representation of the game
:return: None
"""
for row in range(ROWS):
self.board.append([])
for col in range(COLS):
@ -36,7 +50,12 @@ class Board:
self.board[row].append(0)
def draw(self, win):
def draw(self, win: pygame.display) -> None:
"""
Draws the pieces on the board
:param win: The window
:return: None
"""
self._drawSquares(win)
for row in range(ROWS):
for col in range(COLS):
@ -44,7 +63,14 @@ class Board:
if piece != 0:
piece.draw(win)
def move(self, piece, row, col):
def move(self, piece: Piece, row: int, col: int) -> None:
"""
Moves a piece and make it a king if it reaches the end of the board
:param piece: Piece to move
:param row: Row to move to
:param col: Column to move to
:return: None
"""
self.board[piece.row][piece.col], self.board[row][col] = self.board[row][col], self.board[piece.row][piece.col]
piece.move(row, col)
@ -57,7 +83,11 @@ class Board:
if piece.colour == GREEN:
self.greenKings += 1
def remove(self, skipped):
def remove(self, skipped: tuple) -> None:
"""
Removes a piece from the board
:param skipped: A tuple of the piece to remove
"""
for piece in skipped:
self.board[piece.row][piece.col] = 0
if piece != 0:
@ -66,7 +96,12 @@ class Board:
continue
self.whiteLeft -= 1
def getAllMoves(self, colour):
def getAllMoves(self, colour: int) -> list:
"""
Gets all the possible moves for a player
:param colour: colour of the player
:return:
"""
moves = []
possibleMoves = []
possiblePieces = []
@ -103,14 +138,28 @@ class Board:
return moves
def _simulateMove(self, piece, move, board, skip):
def _simulateMove(self, piece: Piece, move: list, board: Board, skip: tuple) -> Board:
"""
Simulates a move on the board
:param piece: Piece to move
:param move: Move to make
:param board: Board to make the move on
:param skip: Tuple of pieces to skip
:return: Board after the move
"""
board.move(piece, move[0], move[1])
if skip:
board.remove(skip)
return board
def getPiece(self, row, col):
def getPiece(self, row: int, col: int) -> Piece:
"""
Gets a piece from the board
:param row: Row of the piece
:param col: Column of the piece
:return: Piece
"""
return self.board[row][col]
def winner(self):
@ -122,7 +171,12 @@ class Board:
return None
def getValidMoves(self, piece):
def getValidMoves(self, piece: Piece) -> dict:
"""
Gets all the valid moves for a piece
:param piece: Piece to get the moves for
:return: dictionary of moves
"""
moves = {}
forcedCapture = {}
left = piece.col - 1
@ -162,10 +216,19 @@ class Board:
return forcedCapture
def scoreOfTheBoard(self):
def scoreOfTheBoard(self) -> int:
"""
Calculates the score of the board
:return: score of the board
"""
return self.whiteLeft - self.greenLeft
def getAllPieces(self, colour):
"""
Gets all the pieces of a player
:param colour: Piece colour
:return: Pieces of the player
"""
pieces = []
for row in self.board:
for piece in row:
@ -173,7 +236,17 @@ class Board:
pieces.append(piece)
return pieces
def _traverseLeft(self, start, stop, step, colour, left, skipped=[]):
def _traverseLeft(self, start: int, stop: int, step: int, colour: int, left: int, skipped: list = []) -> dict:
"""
Traverses the left side of the board
:param start: Start position
:param stop: Stop position
:param step: Step size
:param colour: colour of the player
:param left: Left position
:param skipped: List of pieces to skip
:return: dictionary of moves
"""
moves = {}
last = []
for row in range(start, stop, step):
@ -189,7 +262,17 @@ class Board:
left -= 1
return moves
def _traverseRight(self, start, stop, step, colour, right, skipped=[]):
def _traverseRight(self, start: int, stop: int, step: int, colour: int, right: int, skipped: list = []) -> dict:
"""
Traverses the left side of the board
:param start: Start position
:param stop: Stop position
:param step: Step size
:param colour: colour of the player
:param right: Right position
:param skipped: List of pieces to skip
:return: dictionary of moves
"""
moves = {}
last = []
for row in range(start, stop, step):
@ -207,7 +290,18 @@ class Board:
right += 1
return moves
def _traverse(self, row, col, skipped, moves, step, last, colour):
def _traverse(self, row: int, col: int, skipped: list, moves: dict, step: int, last: list, colour: int) -> list or None:
"""
Traverses the board
:param row: Row to traverse
:param col: Column to traverse
:param skipped: List of pieces to jump
:param moves: Dictionary of moves
:param step: Step size
:param last: List of last pieces
:param colour: Colour of the player
:return: list of last pieces or None
"""
current = self.board[row][col]
if current == 0:
if skipped and not last:
@ -231,7 +325,13 @@ class Board:
last = [current]
return last
def step(self, move, colour):
def step(self, move: int, colour: int) -> None:
"""
Takes a move and executes it
:param move: The move to execute
:param colour: The colour of the player
:return: None
"""
start, end = self._decode(move)
start[0] = start[0] - 1
start[1] = start[1] - 1
@ -264,7 +364,12 @@ class Board:
return reward, self, done
def _decode(self, move):
def _decode(self, move: int) -> tuple:
"""
Decodes the move from a integer to a start and end tuple
:param move: The move to decode
:return: Start and end tuple
"""
# Split digits back out
str_code = str(move)
# print(str_code)

View File

@ -4,7 +4,7 @@ WIDTH, HEIGHT = 800, 800
ROWS, COLS = 8, 8
SQUARE_SIZE = WIDTH // COLS
# RGB color
# RGB colour
GREEN = 1
WHITE = 2

View File

@ -1,29 +1,54 @@
from __future__ import annotations
import pygame
from utilities.Board import Board
from utilities.constants import GREEN, WHITE, BLUE, SQUARE_SIZE
class GameManager:
def __init__(self, win, colour):
def __init__(self, win: pygame.display, colour: int) -> None:
"""
Constructor for the GameManager class
:param win: The window
:param colour: The colour of the player
"""
self._init(colour)
self.win = win
def _init(self, colour):
def _init(self, colour: int) -> None:
"""
Initializes the game
:param colour: the colour of the player
"""
self.selected = None
self.board = Board()
self.turn = colour
self.validMoves = {}
self.legCount = 0
def update(self):
def update(self) -> None:
"""
Updates the GUI
return: None
"""
self.board.draw(self.win)
self.drawValidMoves(self.validMoves)
pygame.display.update()
def reset(self):
def reset(self) -> None:
"""
Resets the game
:return: None
"""
self._init(self.turn)
def select(self, row, col):
def select(self, row: int, col: int) -> bool:
"""
Selects a piece
:param row: Row of the piece
:param col: Column of the piece
:return: True
"""
if self.selected:
result = self._move(row, col)
if not result:
@ -35,7 +60,13 @@ class GameManager:
self.validMoves = self.board.getValidMoves(piece)
return True
def _move(self, row, col):
def _move(self, row: int, col: int) -> bool:
"""
Moves a piece
:param row: Row of the piece
:param col: Column of the piece
:return: True if the move was successful, False otherwise
"""
piece = self.board.getPiece(row, col)
if self.selected and piece == 0 and (row, col) in self.validMoves:
self.board.move(self.selected, row, col)
@ -62,18 +93,36 @@ class GameManager:
return
self.turn = GREEN
def drawValidMoves(self, moves):
def drawValidMoves(self, moves: list) -> None:
"""
Draws the valid moves
:param moves: list of valid moves
:return: None
"""
for row, col in moves:
pygame.draw.circle(self.win, BLUE,
(col * SQUARE_SIZE + SQUARE_SIZE // 2, row * SQUARE_SIZE + SQUARE_SIZE // 2), 15)
def winner(self):
def winner(self) -> int or None:
"""
Gets the winner
:return: The winner
"""
return self.board.winner()
def getBoard(self):
def getBoard(self) -> Board:
"""
Gets the board
:return: The board
"""
return self.board
def aiMove(self, board):
def aiMove(self, board: Board) -> None:
"""
Makes a move for the AI
:param board: The new board
:return: None
"""
if board is None:
# colour = "green" if self.turn == GREEN else "white"
# print("no move left for " + colour + " to make")

View File

@ -4,7 +4,13 @@ from utilities.constants import SQUARE_SIZE, GREY, CROWN, GREEN
class Piece:
def __init__(self, row, col, colour):
def __init__(self, row: int, col: int, colour: int) -> None:
"""
Initialises the piece class, which represents a piece on the board. Constructor for the piece class
:param row: Row of the piece
:param col: Column of the piece
:param colour: Colour of the piece
"""
self.row = row
self.col = col
self.colour = colour
@ -17,24 +23,47 @@ class Piece:
self.green = (144, 184, 59)
self.white = (255, 255, 255)
def calcPosition(self):
def calcPosition(self) -> None:
"""
Calculates the position of the piece
:return: None
"""
self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2
self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2
def makeKing(self):
def makeKing(self) -> None:
"""
Makes the piece a king
:return: None
"""
self.king = True
def draw(self, win):
def draw(self, win) -> None:
"""
Draws the piece
:param win: The window to draw the piece on
:return: None
"""
radius = SQUARE_SIZE // 2 - self.padding
pygame.draw.circle(win, GREY, (self.x, self.y), radius + self.border)
pygame.draw.circle(win, self.green if self.colour == GREEN else self.white, (self.x, self.y), radius)
if self.king:
win.blit(CROWN, (self.x - CROWN.get_width() // 2, self.y - CROWN.get_height() // 2))
def move(self, row, col):
def move(self, row: int, col: int) -> None:
"""
Moves the piece to a new position
:param row: Row to move to
:param col: Column to move to
:return: None
"""
self.row = row
self.col = col
self.calcPosition()
def __repr__(self):
def __repr__(self) -> str:
"""
String representation of the piece
:return: String representation of the colour
"""
return str(self.colour)

100
winners-3.txt Normal file
View File

@ -0,0 +1,100 @@
2
2
2
2
0
2
2
2
2
2
2
0
0
2
1
2
2
2
2
2
2
2
2
2
2
0
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
1
0
2
2
2
2
2
2
2
2
2
1
2
1
0
2
0
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
1
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2

100
winners-5.txt Normal file
View File

@ -0,0 +1,100 @@
2
2
1
2
1
1
2
2
2
2
2
2
2
2
2
2
0
2
2
2
1
2
2
0
2
2
0
2
2
0
0
2
2
2
2
2
0
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
2
0
2
2
2
0
2
0
2
2
2
2
1
0
2
2
2
2
2
2
1
2
2
2
2
0
2
0
2
2
2
2
2
1
2
2
1
2
2
2
2
2
2
2
2