-module(ai_core). -export([init_ai/1, make_decision/2, update_strategy/3]). -record(ai_state, { personality, % aggressive | conservative | balanced strategy_weights, % 策略权重 knowledge_base, % 知识库 game_history = [] % 游戏历史 }). %% AI初始化 init_ai(Personality) -> #ai_state{ personality = Personality, strategy_weights = init_weights(Personality), knowledge_base = init_knowledge_base() }. %% 决策制定 make_decision(AIState, GameState) -> % 分析当前局势 Situation = analyze_situation(GameState), % 生成可能的行动 PossiblePlays = generate_possible_plays(GameState), % 评估每个行动 RatedPlays = evaluate_plays(PossiblePlays, AIState, Situation), % 选择最佳行动 select_best_play(RatedPlays, AIState). %% 策略更新 update_strategy(AIState, GameResult, GameHistory) -> NewWeights = adjust_weights(AIState#ai_state.strategy_weights, GameResult), NewKnowledge = update_knowledge(AIState#ai_state.knowledge_base, GameHistory), AIState#ai_state{ strategy_weights = NewWeights, knowledge_base = NewKnowledge, game_history = [GameHistory | AIState#ai_state.game_history] }. %% 内部函数 init_weights(aggressive) -> #{ control_weight => 0.8, attack_weight => 0.7, defense_weight => 0.3, risk_weight => 0.6 }; init_weights(conservative) -> #{ control_weight => 0.5, attack_weight => 0.4, defense_weight => 0.8, risk_weight => 0.3 }; init_weights(balanced) -> #{ control_weight => 0.6, attack_weight => 0.6, defense_weight => 0.6, risk_weight => 0.5 }. analyze_situation(GameState) -> #{ hand_strength => evaluate_hand_strength(GameState), control_status => evaluate_control(GameState), opponent_cards => estimate_opponent_cards(GameState), game_stage => determine_game_stage(GameState) }. generate_possible_plays(GameState) -> MyCards = get_my_cards(GameState), LastPlay = get_last_play(GameState), generate_valid_plays(MyCards, LastPlay). evaluate_plays(Plays, AIState, Situation) -> lists:map( fun(Play) -> Score = calculate_play_score(Play, AIState, Situation), {Play, Score} end, Plays ). calculate_play_score(Play, AIState, Situation) -> Weights = AIState#ai_state.strategy_weights, ControlScore = evaluate_control_value(Play, Situation) * maps:get(control_weight, Weights), AttackScore = evaluate_attack_value(Play, Situation) * maps:get(attack_weight, Weights), DefenseScore = evaluate_defense_value(Play, Situation) * maps:get(defense_weight, Weights), RiskScore = evaluate_risk_value(Play, Situation) * maps:get(risk_weight, Weights), ControlScore + AttackScore + DefenseScore + RiskScore. select_best_play(RatedPlays, AIState) -> case AIState#ai_state.personality of aggressive -> select_aggressive(RatedPlays); conservative -> select_conservative(RatedPlays); balanced -> select_balanced(RatedPlays) end. %% 策略选择函数 select_aggressive(RatedPlays) -> % 倾向于选择得分最高的行动 {Play, _Score} = lists:max(RatedPlays), Play. select_conservative(RatedPlays) -> % 倾向于选择风险较低的行动 SafePlays = filter_safe_plays(RatedPlays), case SafePlays of [] -> select_balanced(RatedPlays); _ -> select_from_safe_plays(SafePlays) end. select_balanced(RatedPlays) -> % 在得分和风险之间寻找平衡 {Play, _Score} = select_balanced_play(RatedPlays), Play. %% 评估函数 evaluate_hand_strength(GameState) -> Cards = get_my_cards(GameState), calculate_hand_value(Cards). evaluate_control(GameState) -> % 评估是否控制局势 LastPlay = get_last_play(GameState), MyCards = get_my_cards(GameState), can_control_game(MyCards, LastPlay). estimate_opponent_cards(GameState) -> % 基于已出牌情况估计对手手牌 PlayedCards = get_played_cards(GameState), MyCards = get_my_cards(GameState), estimate_remaining_cards(PlayedCards, MyCards). %% 知识库更新 update_knowledge(KnowledgeBase, GameHistory) -> % 更新AI的知识库 NewPatterns = extract_patterns(GameHistory), merge_knowledge(KnowledgeBase, NewPatterns). extract_patterns(GameHistory) -> % 从游戏历史中提取出牌模式 lists:foldl( fun(Play, Patterns) -> Pattern = analyze_play_pattern(Play), update_pattern_stats(Pattern, Patterns) end, #{}, GameHistory ). merge_knowledge(Old, New) -> maps:merge_with( fun(_Key, OldValue, NewValue) -> update_knowledge_value(OldValue, NewValue) end, Old, New ).