Loading...
Loading...
Build autonomous game-playing agents using AI and reinforcement learning. Covers game environments, agent decision-making, strategy development, and performance optimization. Use when creating game-playing bots, testing game AI, strategic decision-making systems, or game theory applications.
npx skill4agent add qodex-ai/ai-agent-skills autonomous-agent-gaming# Rule-based agent
python examples/rule_based_agent.py
# Minimax with alpha-beta pruning
python examples/minimax_agent.py
# Monte Carlo Tree Search
python examples/mcts_agent.py
# Q-Learning agent
python examples/qlearning_agent.py
# Chess engine
python examples/chess_engine.py
# Game theory analysis
python scripts/game_theory_analyzer.py
# Benchmark agents
python scripts/agent_benchmark.pyexamples/rule_based_agent.pyfrom examples.rule_based_agent import RuleBasedGameAgent
agent = RuleBasedGameAgent(difficulty="hard")
best_move = agent.decide_action(game_state)examples/minimax_agent.pyfrom examples.minimax_agent import MinimaxGameAgent
agent = MinimaxGameAgent(max_depth=6)
best_move = agent.get_best_move(game_state)examples/mcts_agent.pyfrom examples.mcts_agent import MCTSAgent
agent = MCTSAgent(iterations=1000, exploration_constant=1.414)
best_move = agent.get_best_move(game_state)examples/qlearning_agent.pyfrom examples.qlearning_agent import QLearningAgent
agent = QLearningAgent(learning_rate=0.1, discount_factor=0.99, epsilon=0.1)
action = agent.get_action(state)
agent.update_q_value(state, action, reward, next_state)
agent.decay_epsilon() # Reduce exploration over timeexamples/game_environment.pyreset()step(action)get_legal_actions(state)is_terminal(state)render()import gym
# Create environment
env = gym.make('CartPole-v1')
# Initialize
state = env.reset()
# Run episode
done = False
while not done:
action = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.update(state, action, reward, next_state)
state = next_state
env.close()examples/chess_engine.pypip install python-chessfrom examples.chess_engine import ChessAgent
agent = ChessAgent()
result, moves = agent.play_game()
print(f"Game result: {result} in {moves} moves")examples/game_environment.pyfrom examples.game_environment import PygameGameEnvironment
class MyGame(PygameGameEnvironment):
def get_initial_state(self):
# Return initial game state
pass
def apply_action(self, state, action):
# Execute action, return new state
pass
def calculate_reward(self, state, action, next_state):
# Return reward value
pass
def is_terminal(self, state):
# Check if game is over
pass
def draw_state(self, state):
# Render using pygame
pass
game = MyGame()
game.render()examples/strategy_modules.pyfrom examples.strategy_modules import OpeningBook
book = OpeningBook()
if book.in_opening(game_state):
move = book.get_opening_move(game_state)from examples.strategy_modules import EndgameTablebase
tablebase = EndgameTablebase()
if tablebase.in_tablebase(game_state):
move = tablebase.get_best_endgame_move(game_state)
dtm = tablebase.get_endgame_distance(game_state)AdaptiveGameAgentfrom examples.strategy_modules import AdaptiveGameAgent
from examples.minimax_agent import MinimaxGameAgent
agent = AdaptiveGameAgent(
opening_book=book,
middlegame_engine=MinimaxGameAgent(max_depth=6),
endgame_tablebase=tablebase
)
move = agent.decide_action(game_state)
phase_info = agent.get_phase_info(game_state)CompositeStrategyfrom examples.strategy_modules import CompositeStrategy
composite = CompositeStrategy([
opening_strategy,
endgame_strategy,
default_search_strategy
])
move = composite.get_move(game_state)
active = composite.get_active_strategy(game_state)scripts/performance_optimizer.pyfrom scripts.performance_optimizer import TranspositionTable
tt = TranspositionTable(max_size=1000000)
# Store evaluation
tt.store(position_hash, depth=6, score=150, flag='exact')
# Lookup
score = tt.lookup(position_hash, depth=6)
hit_rate = tt.hit_rate()from scripts.performance_optimizer import KillerHeuristic
killers = KillerHeuristic(max_depth=20)
# When a cutoff occurs
killers.record_killer(move, depth=5)
# When ordering moves
killer_list = killers.get_killers(depth=5)
is_killer = killers.is_killer(move, depth=5)from scripts.performance_optimizer import ParallelSearchCoordinator
coordinator = ParallelSearchCoordinator(num_threads=4)
# Parallel move evaluation
scores = coordinator.parallel_evaluate_moves(moves, evaluate_func)
# Parallel minimax
best_move, score = coordinator.parallel_minimax(root_moves, minimax_func)
coordinator.shutdown()SearchStatisticsfrom scripts.performance_optimizer import SearchStatistics
stats = SearchStatistics()
# During search
stats.record_node()
stats.record_cutoff()
stats.record_cache_hit()
# Analysis
print(stats.summary())
print(f"Pruning efficiency: {stats.pruning_efficiency():.1f}%")scripts/game_theory_analyzer.pyfrom scripts.game_theory_analyzer import GameTheoryAnalyzer, PayoffMatrix
import numpy as np
# Create payoff matrix
p1_payoffs = np.array([[3, 0], [5, 1]])
p2_payoffs = np.array([[3, 5], [0, 1]])
matrix = PayoffMatrix(
player1_payoffs=p1_payoffs,
player2_payoffs=p2_payoffs,
row_labels=['Strategy A', 'Strategy B'],
column_labels=['Strategy X', 'Strategy Y']
)
analyzer = GameTheoryAnalyzer()
# Find pure Nash equilibria
equilibria = analyzer.find_pure_strategy_nash_equilibria(matrix)
# Find mixed Nash equilibrium (2x2 only)
p1_mixed, p2_mixed = analyzer.calculate_mixed_strategy_2x2(matrix)
# Expected payoff
payoff = analyzer.calculate_expected_payoff(p1_mixed, p2_mixed, matrix, player=1)
# Zero-sum analysis
if matrix.is_zero_sum():
minimax = analyzer.minimax_value(matrix)
maximin = analyzer.maximin_value(matrix)from scripts.game_theory_analyzer import CooperativeGameAnalyzer
coop = CooperativeGameAnalyzer()
# Define payoff function for coalitions
def payoff_func(coalition):
# Return total value of coalition
return sum(player_values[p] for p in coalition)
players = ['Alice', 'Bob', 'Charlie']
# Calculate Shapley values
shapley = coop.calculate_shapley_value(payoff_func, players)
print(f"Alice's fair share: {shapley['Alice']}")
# Find core allocation
core = coop.calculate_core(payoff_func, players)
is_stable = coop.is_core_allocation(core, payoff_func, players)scripts/agent_benchmark.pyfrom scripts.agent_benchmark import GameAgentBenchmark
benchmark = GameAgentBenchmark()
# Run tournament
results = benchmark.run_tournament(agents, num_games=100)
# Compare two agents
comparison = benchmark.head_to_head_comparison(agent1, agent2, num_games=50)
print(f"Win rate: {comparison['agent1_win_rate']:.1%}")# Elo ratings
elo_ratings = benchmark.evaluate_elo_rating(agents, num_games=100)
# Glicko-2 ratings
glicko_ratings = benchmark.glicko2_rating(agents, num_games=100)
# Strength relative to baseline
strength = benchmark.rate_agent_strength(agent, baseline_agents, num_games=20)# Get performance profile
profile = benchmark.performance_profile(agent, test_positions, time_limit=1.0)
print(f"Accuracy: {profile['accuracy']:.1%}")
print(f"Avg move quality: {profile['avg_move_quality']:.2f}")