- Reorganized the constructors in Move.java.
- Made the getBoardState method in Referee.java static. - Created NeuralNetworkPlayer.java, though I don't know how to make the computer use it. - Updated the Player interface to include passing the PlayerModel. Most of the current com agents ignore the data, but it is now available. - Updated the train function in PlayerModel.java.
This commit is contained in:
@@ -20,12 +20,32 @@ public class Referee implements Runnable {
|
||||
|
||||
public static final String PLAYER_TURN = "Waiting for the player's move.";
|
||||
|
||||
public static boolean[] getBoardState(Board brd) {
|
||||
boolean[] boardState = new boolean[(Board.NUM_COLS * Board.NUM_ROWS * (Board.TileColor
|
||||
.values().length - 1))];
|
||||
|
||||
int i = 0;
|
||||
for (int r = 0; r < Board.NUM_ROWS; r++) {
|
||||
for (int c = 0; c < Board.NUM_COLS; c++) {
|
||||
boardState[i] = (brd.getTile(r, c) == TileColor.BLUE);
|
||||
boardState[i + 1] = (brd.getTile(r, c) == TileColor.GREEN);
|
||||
boardState[i + 2] = (brd.getTile(r, c) == TileColor.RED);
|
||||
boardState[i + 3] = (brd.getTile(r, c) == TileColor.YELLOW);
|
||||
|
||||
i += 4;
|
||||
}
|
||||
}
|
||||
|
||||
return boardState;
|
||||
}
|
||||
|
||||
private Board board;
|
||||
private BoardPanel boardPanel;
|
||||
private final Player computerPlayer;
|
||||
|
||||
private final Player computerPlayer;
|
||||
private final HumanPlayer humanPlayer = new HumanPlayer();
|
||||
private final MainFrame mf;
|
||||
|
||||
private PlayerModel playerModel = null;
|
||||
|
||||
public Referee(MainFrame mnFrm, String player, Player computerPlayer) {
|
||||
@@ -43,24 +63,6 @@ public class Referee implements Runnable {
|
||||
initGame();
|
||||
}
|
||||
|
||||
public boolean[] getBoardState() {
|
||||
boolean[] boardState = new boolean[getPlayerModel().getNumInputNodes()];
|
||||
|
||||
int i = 0;
|
||||
for (int r = 0; r < Board.NUM_ROWS; r++) {
|
||||
for (int c = 0; c < Board.NUM_COLS; c++) {
|
||||
boardState[i] = (board.getTile(r, c) == TileColor.BLUE);
|
||||
boardState[i + 1] = (board.getTile(r, c) == TileColor.GREEN);
|
||||
boardState[i + 2] = (board.getTile(r, c) == TileColor.RED);
|
||||
boardState[i + 3] = (board.getTile(r, c) == TileColor.YELLOW);
|
||||
|
||||
i += 4;
|
||||
}
|
||||
}
|
||||
|
||||
return boardState;
|
||||
}
|
||||
|
||||
public Player getComputerPlayer() {
|
||||
return computerPlayer;
|
||||
}
|
||||
@@ -162,26 +164,30 @@ public class Referee implements Runnable {
|
||||
System.out
|
||||
.println("Interrupted while waiting for human to move!");
|
||||
} else {
|
||||
Move mv = humanPlayer.getMove(board);
|
||||
Move mv = humanPlayer.getMove(board, playerModel);
|
||||
if (board.getTile(mv.getCell().r, mv.getCell().c) == TileColor.NONE) {
|
||||
playToken(humanPlayer.getMove(board));
|
||||
playToken(humanPlayer.getMove(board, playerModel));
|
||||
|
||||
getPlayerModel().train(getMoveArray(mv));
|
||||
getPlayerModel().train(getBoardState(board),
|
||||
getMoveArray(mv));
|
||||
|
||||
} else {
|
||||
humanPlayer.denyMove();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Move mv = computerPlayer.getMove(board);
|
||||
Move mv = computerPlayer.getMove(board, getPlayerModel());
|
||||
playToken(mv);
|
||||
|
||||
// TODO
|
||||
// This is the call that gets a prediction of a user's move.
|
||||
// Some changes will probably be necessary to put it in the
|
||||
// right place and also to get the node weights. But... all in
|
||||
// due time.
|
||||
getPlayerModel().getOutputNodes(getBoardState());
|
||||
// UPDATE: I made a neural network agent. This call is in there
|
||||
// now. If the current agent doesn't use the neural network,
|
||||
// then this doesn't need to be called. It will just train on
|
||||
// random, meaningless data.
|
||||
// getPlayerModel().getOutputNodes(getBoardState(board));
|
||||
}
|
||||
|
||||
mf.updateMessage(getMessage());
|
||||
|
||||
Reference in New Issue
Block a user