Search#

class pantograph.search.Agent#

An agent interface for proof search

abstract guidance(state: GoalState) list[float]#

Return a list of priorities determining which goal should be searched first. This will not be called on states with one or zero goals.

abstract next_tactic(state: GoalState, goal_id: int) str | TacticHave | TacticLet | TacticCalc | TacticExpr | None#

Implement this function to generate the next tactic for a goal

abstract reset()#

Called after search

search(server: Server, goal_state: GoalState, max_steps: int = 100, max_trials_per_goal: int = 5, verbose: bool = False) SearchResult#

Executes proof search on this state

class pantograph.search.DumbAgent#
next_tactic(state: GoalState, goal_id: int) str | TacticHave | TacticLet | TacticCalc | TacticExpr | None#

Implement this function to generate the next tactic for a goal

class pantograph.search.DumbMCTSAgent#
estimate(state: SearchState) SearchState#

Implement this function to estimate the value of a state

next_tactic(state: GoalState, goal_id: int, tested: List[str | TacticHave | TacticLet | TacticCalc | TacticExpr] | None = None) str | TacticHave | TacticLet | TacticCalc | TacticExpr | None#

Implement this function to generate the next tactic for a goal given tactics already tested

select(state: SearchState) list[SearchState]#

UCB scoring with taking the current state as one option, i.e. one child

class pantograph.search.MCTSAgent#

An agent interface for proof search using monte carlo tree search

backup(states: list[SearchState], value: float)#

Backup value of the state at the end of the states list.

abstract estimate(state: SearchState) SearchState#

Implement this function to estimate the value of a state

abstract next_tactic(state: GoalState, goal_id: int, tested: List[str | TacticHave | TacticLet | TacticCalc | TacticExpr] | None = None) str | TacticHave | TacticLet | TacticCalc | TacticExpr | None#

Implement this function to generate the next tactic for a goal given tactics already tested

abstract reset()#

Called after search

search(server: Server, goal_state: GoalState, max_steps: int = 100, max_trials_per_goal: int = 5, verbose: bool = False) SearchResult#

Executes proof search on this state

abstract select(state: SearchState) list[SearchState]#

Implement this function to select the best node within the subtree of the state. Returns the path to the selected node from the given state.

class pantograph.search.SearchResult(n_goals_root: int, duration: float, success: bool, steps: int)#
class pantograph.search.SearchState(goal_state: pantograph.expr.GoalState, parent: Self | None, parent_goal_id: int | None, priorities: list[float], children: List[Self] | None = None, tested_tactics: List[str | pantograph.expr.TacticHave | pantograph.expr.TacticLet | pantograph.expr.TacticCalc | pantograph.expr.TacticExpr] | None = None, total_value: float | None = None, tactic_feedback: str | None = None)#
class pantograph.search.TestMCTSSearch(methodName='runTest')#
class pantograph.search.TestSearch(methodName='runTest')#