Implemented agent which chooses to play winning, losing or random moves by solving a simplified MDP model of the game using policy iteration.

Portions of MDP/solver code by Ciaran O'Reilly and Ravi Mohan used under MIT license.
This commit is contained in:
Woody Folsom
2012-04-30 13:35:40 -04:00
parent c06f7ab38e
commit d0ee1e647b
35 changed files with 2500 additions and 3 deletions

View File

@@ -1,5 +1,6 @@
package view;
import model.comPlayer.AdaptiveComPlayer;
import model.comPlayer.AlphaBetaComPlayer;
import model.comPlayer.MinimaxComPlayer;
import model.comPlayer.MonteCarloComPlayer;
@@ -7,16 +8,19 @@ import model.comPlayer.Player;
import model.comPlayer.RandomComPlayer;
public class ParsedArgs {
public static final String COM_RANDOM = "RANDOM";
public static final String COM_MINIMAX = "MINIMAX";
public static final String COM_ADAPTIVE = "ADAPTIVE";
public static final String COM_ALPHABETA = "ALPHABETA";
public static final String COM_MINIMAX = "MINIMAX";
public static final String COM_MONTECARLO = "MONTECARLO";
public static final String COM_RANDOM = "RANDOM";
public static final String COM_DEFAULT = COM_ALPHABETA;
private String comPlayer = COM_DEFAULT;
public Player getComPlayer() {
if (COM_RANDOM.equalsIgnoreCase(comPlayer)) {
if (COM_ADAPTIVE.equalsIgnoreCase(comPlayer)) {
return new AdaptiveComPlayer();
} else if (COM_RANDOM.equalsIgnoreCase(comPlayer)) {
return new RandomComPlayer();
} else if (COM_MINIMAX.equalsIgnoreCase(comPlayer)) {
return new MinimaxComPlayer();