Fixed unit tests, changed MDP generation to more reasonably seek the goal state, avoiding premature end of game.

Removed unused google-code classes.
Regenerate policy when AdaptiveComPlayer.setTarget() is called.
This commit is contained in:
Woody Folsom
2012-04-30 17:37:37 -04:00
parent 3800436cd9
commit 8f92ae65d8
19 changed files with 53 additions and 939 deletions

View File

@@ -1,15 +1,8 @@
package aima.core.probability.mdp;
import java.util.Map;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import aima.core.environment.cellworld.Cell;
import aima.core.environment.cellworld.CellWorld;
import aima.core.environment.cellworld.CellWorldAction;
import aima.core.environment.cellworld.CellWorldFactory;
import aima.core.environment.gridworld.GridCell;
import aima.core.environment.gridworld.GridWorld;
import aima.core.environment.gridworld.GridWorldAction;
@@ -18,7 +11,6 @@ import aima.core.probability.example.MDPFactory;
import aima.core.probability.mdp.MarkovDecisionProcess;
import aima.core.probability.mdp.impl.ModifiedPolicyEvaluation;
import aima.core.probability.mdp.search.PolicyIteration;
import aima.core.probability.mdp.search.ValueIteration;
/**
* @author Ravi Mohan
@@ -29,28 +21,31 @@ public class PolicyIterationTest {
public static final double DELTA_THRESHOLD = 1e-3;
private GridWorld<Double> gw = null;
private MarkovDecisionProcess<GridCell<Double>, GridWorldAction> mdp = null;
private MarkovDecisionProcess<GridCell<Double>, GridWorldAction> mdp = null;
private PolicyIteration<GridCell<Double>, GridWorldAction> pi = null;
final int maxTiles = 6;
final int maxScore = 10;
@Before
public void setUp() {
//take 10 turns to place 6 tiles
// take 10 turns to place 6 tiles
double defaultPenalty = -0.04;
gw = GridWorldFactory.createGridWorldForTileGame(maxTiles,maxScore,defaultPenalty);
gw = GridWorldFactory.createGridWorldForTileGame(maxTiles, maxScore,
defaultPenalty);
mdp = MDPFactory.createMDPForTileGame(gw, maxTiles, maxScore);
//gamma = 1.0
PolicyEvaluation<GridCell<Double>,GridWorldAction> pe = new ModifiedPolicyEvaluation<GridCell<Double>, GridWorldAction>(100,0.9);
// gamma = 1.0
PolicyEvaluation<GridCell<Double>, GridWorldAction> pe = new ModifiedPolicyEvaluation<GridCell<Double>, GridWorldAction>(
100, 0.9);
pi = new PolicyIteration<GridCell<Double>, GridWorldAction>(pe);
}
@Test
public void testPolicyIterationForTileGame() {
Policy<GridCell<Double>, GridWorldAction> policy = pi.policyIteration(mdp);
Policy<GridCell<Double>, GridWorldAction> policy = pi
.policyIteration(mdp);
for (int j = maxScore; j >= 1; j--) {
StringBuilder sb = new StringBuilder();
@@ -60,21 +55,5 @@ public class PolicyIterationTest {
}
System.out.println(sb.toString());
}
//Assert.assertEquals(0.705, U.get(gw.getCellAt(1, 1)), DELTA_THRESHOLD);
/*
Assert.assertEquals(0.762, U.get(cw1.getCellAt(1, 2)), DELTA_THRESHOLD);
Assert.assertEquals(0.812, U.get(cw1.getCellAt(1, 3)), DELTA_THRESHOLD);
Assert.assertEquals(0.655, U.get(cw1.getCellAt(2, 1)), DELTA_THRESHOLD);
Assert.assertEquals(0.868, U.get(cw1.getCellAt(2, 3)), DELTA_THRESHOLD);
Assert.assertEquals(0.611, U.get(cw1.getCellAt(3, 1)), DELTA_THRESHOLD);
Assert.assertEquals(0.660, U.get(cw1.getCellAt(3, 2)), DELTA_THRESHOLD);
Assert.assertEquals(0.918, U.get(cw1.getCellAt(3, 3)), DELTA_THRESHOLD);
Assert.assertEquals(0.388, U.get(cw1.getCellAt(4, 1)), DELTA_THRESHOLD);
Assert.assertEquals(-1.0, U.get(cw1.getCellAt(4, 2)), DELTA_THRESHOLD);
Assert.assertEquals(1.0, U.get(cw1.getCellAt(4, 3)), DELTA_THRESHOLD);*/
}
}