Last change
on this file since 84 was 67, checked in by Aron Hammond, 7 years ago |
Added support for agents that learn via ReinforcementLearning, including an implementation of an agent that uses tabular Q-learning
|
File size:
990 bytes
|
Line | |
---|
1 | package uva.projectai.y2018.jasparon;
|
---|
2 |
|
---|
3 | import java.util.Comparator;
|
---|
4 |
|
---|
5 | import genius.core.bidding.BidDetails;
|
---|
6 | import genius.core.boaframework.OpponentModel;
|
---|
7 |
|
---|
8 | public class JasparonBidComparator implements Comparator<BidDetails> {
|
---|
9 |
|
---|
10 | private OpponentModel opponentModel;
|
---|
11 |
|
---|
12 | @Override
|
---|
13 | public int compare(BidDetails arg0, BidDetails arg1) {
|
---|
14 | return getMeasure(arg0) > getMeasure(arg1) ? -1 : 1;
|
---|
15 | }
|
---|
16 |
|
---|
17 | public JasparonBidComparator(OpponentModel opponentModel) {
|
---|
18 | this.opponentModel = opponentModel;
|
---|
19 | }
|
---|
20 |
|
---|
21 | /*
|
---|
22 | * returns a double that represents the value of a value of a bid, taking into account both the agents
|
---|
23 | * own and opponents' utility.
|
---|
24 | *
|
---|
25 | * Adapted from 2010 AgentSmith ANAC entry
|
---|
26 | */
|
---|
27 | public double getMeasure(BidDetails b1) {
|
---|
28 | double a = (1 - b1.getMyUndiscountedUtil());
|
---|
29 | double b = (1 - this.opponentModel.getBidEvaluation(b1.getBid()));
|
---|
30 |
|
---|
31 | double alpha = Math.atan(b/a);
|
---|
32 |
|
---|
33 | return a + b + (0.5*Math.PI / alpha) * 0.5*Math.PI;
|
---|
34 | }
|
---|
35 |
|
---|
36 | }
|
---|
Note:
See
TracBrowser
for help on using the repository browser.