Skip to content

Commit

Permalink
autopep8 -r . -i --max-line-length 1000 --ignore E309
Browse files Browse the repository at this point in the history
  • Loading branch information
thouis committed Sep 22, 2016
1 parent 8cd09fe commit 2f089be
Show file tree
Hide file tree
Showing 7 changed files with 8 additions and 4 deletions.
2 changes: 1 addition & 1 deletion AlphaGo/ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def get_moves(self, states):
class MCTSPlayer(object):
def __init__(self, value_function, policy_function, rollout_function, lmbda=.5, c_puct=5, rollout_limit=500, playout_depth=40, n_playout=100):
self.mcts = mcts.MCTS(value_function, policy_function, rollout_function, lmbda, c_puct,
rollout_limit, playout_depth, n_playout)
rollout_limit, playout_depth, n_playout)

def get_move(self, state):
sensible_moves = [move for move in state.get_legal_moves(include_eyes=False)]
Expand Down
2 changes: 1 addition & 1 deletion AlphaGo/go.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ def do_move(self, action, color=None):
# Check for end of game
if len(self.history) > 1:
if self.history[-1] is PASS_MOVE and self.history[-2] is PASS_MOVE \
and self.current_player == WHITE:
and self.current_player == WHITE:
self.is_end_of_game = True
return self.is_end_of_game

Expand Down
1 change: 1 addition & 0 deletions AlphaGo/mcts.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ class TreeNode(object):
"""A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""

def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
Expand Down
1 change: 1 addition & 0 deletions AlphaGo/models/nn_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ class Bias(Layer):
Largely copied from the keras docs:
http://keras.io/layers/writing-your-own-keras-layers/#writing-your-own-keras-layers
"""

def __init__(self, **kwargs):
super(Bias, self).__init__(**kwargs)

Expand Down
2 changes: 1 addition & 1 deletion AlphaGo/preprocessing/preprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import AlphaGo.go as go

##
## individual feature functions (state --> tensor) begin here
# individual feature functions (state --> tensor) begin here
##


Expand Down
1 change: 1 addition & 0 deletions AlphaGo/training/reinforcement_policy_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ class BatchedReinforcementLearningSGD(Optimizer):
lr: float >= 0. Learning rate.
ng: int > 0. Number of games played in parallel. Each one has its own cumulative gradient.
'''

def __init__(self, lr=0.01, ng=20, **kwargs):
super(BatchedReinforcementLearningSGD, self).__init__(**kwargs)
self.__dict__.update(locals())
Expand Down
3 changes: 2 additions & 1 deletion interface/Play.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

class play_match(object):
"""Interface to handle play between two players."""

def __init__(self, player1, player2, save_dir=None, size=19):
# super(ClassName, self).__init__()
self.player1 = player1
Expand All @@ -19,7 +20,7 @@ def _play(self, player):
# self.state.write_to_disk()
if len(self.state.history) > 1:
if self.state.history[-1] is None and self.state.history[-2] is None \
and self.state.current_player == -1:
and self.state.current_player == -1:
end_of_game = True
else:
end_of_game = False
Expand Down

0 comments on commit 2f089be

Please sign in to comment.