@ -1,184 +0,0 @@ | |||
-module(advanced_ai_player). | |||
-behaviour(gen_server). | |||
-export([start_link/2, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). | |||
-export([play_turn/1, get_stats/1]). | |||
-record(state, { | |||
name, | |||
personality, % aggressive | conservative | balanced | adaptive | |||
learning_model, % 机器学习模型引用 | |||
game_history = [], | |||
play_stats = #{}, | |||
current_game = undefined, | |||
adaptation_level = 0.0 | |||
}). | |||
%% 性格特征定义 | |||
-define(PERSONALITY_TRAITS, #{ | |||
aggressive => #{ | |||
risk_tolerance => 0.8, | |||
combo_preference => 0.7, | |||
control_value => 0.6 | |||
}, | |||
conservative => #{ | |||
risk_tolerance => 0.3, | |||
combo_preference => 0.4, | |||
control_value => 0.8 | |||
}, | |||
balanced => #{ | |||
risk_tolerance => 0.5, | |||
combo_preference => 0.5, | |||
control_value => 0.5 | |||
}, | |||
adaptive => #{ | |||
risk_tolerance => 0.5, | |||
combo_preference => 0.5, | |||
control_value => 0.5 | |||
} | |||
}). | |||
%% API | |||
start_link(Name, Personality) -> | |||
gen_server:start_link(?MODULE, [Name, Personality], []). | |||
play_turn(Pid) -> | |||
gen_server:cast(Pid, play_turn). | |||
get_stats(Pid) -> | |||
gen_server:call(Pid, get_stats). | |||
%% Callbacks | |||
init([Name, Personality]) -> | |||
{ok, LearningModel} = ml_engine:start_link(), | |||
{ok, #state{ | |||
name = Name, | |||
personality = Personality, | |||
learning_model = LearningModel | |||
}}. | |||
handle_cast(play_turn, State) -> | |||
{Play, NewState} = calculate_best_move(State), | |||
execute_play(Play, NewState), | |||
{noreply, NewState}. | |||
handle_call(get_stats, _From, State) -> | |||
Stats = compile_statistics(State), | |||
{reply, {ok, Stats}, State}. | |||
%% 高级AI策略实现 | |||
calculate_best_move(State) -> | |||
GameState = analyze_game_state(State), | |||
Personality = get_personality_traits(State), | |||
% 获取各种可能的动作 | |||
PossiblePlays = generate_possible_plays(State), | |||
% 使用机器学习模型评估每个动作 | |||
RatedPlays = evaluate_plays(PossiblePlays, GameState, State), | |||
% 根据性格特征调整评分 | |||
AdjustedPlays = adjust_by_personality(RatedPlays, Personality, State), | |||
% 选择最佳动作 | |||
BestPlay = select_best_play(AdjustedPlays, State), | |||
% 更新状态和学习模型 | |||
NewState = update_state_and_learn(State, BestPlay, GameState), | |||
{BestPlay, NewState}. | |||
analyze_game_state(State) -> | |||
% 分析当前游戏状态 | |||
#{ | |||
cards_in_hand => get_cards_in_hand(State), | |||
cards_played => get_cards_played(State), | |||
opponent_info => analyze_opponents(State), | |||
game_stage => determine_game_stage(State), | |||
control_status => analyze_control(State) | |||
}. | |||
generate_possible_plays(State) -> | |||
Cards = get_cards_in_hand(State), | |||
LastPlay = get_last_play(State), | |||
% 生成所有可能的牌组合 | |||
AllCombinations = card_rules:generate_combinations(Cards), | |||
% 过滤出合法的出牌选择 | |||
ValidPlays = filter_valid_plays(AllCombinations, LastPlay), | |||
% 添加"不出"选项 | |||
[pass | ValidPlays]. | |||
evaluate_plays(Plays, GameState, State) -> | |||
lists:map( | |||
fun(Play) -> | |||
Score = evaluate_single_play(Play, GameState, State), | |||
{Play, Score} | |||
end, | |||
Plays | |||
). | |||
evaluate_single_play(Play, GameState, State) -> | |||
% 使用机器学习模型评估 | |||
Features = extract_features(Play, GameState), | |||
{ok, BaseScore} = ml_engine:predict(State#state.learning_model, Features), | |||
% 考虑多个因素 | |||
ControlScore = evaluate_control_value(Play, GameState), | |||
TempoScore = evaluate_tempo_value(Play, GameState), | |||
RiskScore = evaluate_risk_value(Play, GameState), | |||
% 综合评分 | |||
BaseScore * 0.4 + ControlScore * 0.3 + TempoScore * 0.2 + RiskScore * 0.1. | |||
adjust_by_personality(RatedPlays, Personality, State) -> | |||
RiskTolerance = maps:get(risk_tolerance, Personality), | |||
ComboPreference = maps:get(combo_preference, Personality), | |||
ControlValue = maps:get(control_value, Personality), | |||
lists:map( | |||
fun({Play, Score}) -> | |||
AdjustedScore = adjust_score_by_traits(Score, Play, RiskTolerance, | |||
ComboPreference, ControlValue, State), | |||
{Play, AdjustedScore} | |||
end, | |||
RatedPlays | |||
). | |||
select_best_play(AdjustedPlays, State) -> | |||
% 根据评分选择最佳动作,但加入一些随机性以避免过于可预测 | |||
case State#state.personality of | |||
adaptive -> | |||
select_adaptive_play(AdjustedPlays, State); | |||
_ -> | |||
select_personality_based_play(AdjustedPlays, State) | |||
end. | |||
update_state_and_learn(State, Play, GameState) -> | |||
% 记录动作 | |||
NewHistory = [Play | State#state.game_history], | |||
% 更新统计信息 | |||
NewStats = update_play_stats(State#state.play_stats, Play), | |||
% 如果是自适应性格,更新适应级别 | |||
NewAdaptationLevel = case State#state.personality of | |||
adaptive -> | |||
update_adaptation_level(State#state.adaptation_level, Play, GameState); | |||
_ -> | |||
State#state.adaptation_level | |||
end, | |||
% 更新机器学习模型 | |||
Features = extract_features(Play, GameState), | |||
Reward = calculate_play_reward(Play, GameState), | |||
ml_engine:update_model(State#state.learning_model, Features, Reward), | |||
State#state{ | |||
game_history = NewHistory, | |||
play_stats = NewStats, | |||
adaptation_level = NewAdaptationLevel | |||
}. |
@ -1,344 +0,0 @@ | |||
-module(advanced_ai_strategy). | |||
-export([init_strategy/0, analyze_situation/2, make_decision/2, learn_from_game/2]). | |||
-record(advanced_ai_state, { | |||
strategy_model, % 策略模型 | |||
situation_model, % 局势分析模型 | |||
learning_model, % 学习模型 | |||
pattern_database, % 牌型数据库 | |||
opponent_models, % 对手模型 | |||
game_history = [] % 游戏历史 | |||
}). | |||
%% 初始化函数 | |||
init_strategy_model() -> | |||
#{ | |||
parameters => #{ | |||
risk_factor => 0.5, | |||
aggressive_factor => 0.5, | |||
defensive_factor => 0.5 | |||
}, | |||
history => [] | |||
}. | |||
init_situation_model() -> | |||
#{ | |||
analysis_weights => #{ | |||
hand_strength => 0.3, | |||
control_level => 0.3, | |||
tempo => 0.2, | |||
position => 0.2 | |||
}, | |||
historical_data => [] | |||
}. | |||
init_learning_model() -> | |||
#{ | |||
learning_rate => 0.01, | |||
discount_factor => 0.9, | |||
exploration_rate => 0.1, | |||
model_weights => #{}, | |||
experience_buffer => [] | |||
}. | |||
init_pattern_database() -> | |||
#{ | |||
basic_patterns => init_basic_patterns(), | |||
complex_patterns => init_complex_patterns(), | |||
pattern_weights => init_pattern_weights() | |||
}. | |||
%% 高级策略初始化 | |||
init_strategy() -> | |||
#advanced_ai_state{ | |||
strategy_model = init_strategy_model(), | |||
situation_model = init_situation_model(), | |||
learning_model = init_learning_model(), | |||
pattern_database = init_pattern_database(), | |||
opponent_models = #{} | |||
}. | |||
%% 更复杂的局势分析 | |||
analyze_situation(State, GameState) -> | |||
BaseAnalysis = basic_situation_analysis(GameState), | |||
OpponentAnalysis = analyze_opponents(State, GameState), | |||
PatternAnalysis = analyze_card_patterns(State, GameState), | |||
WinProbability = calculate_win_probability(State, BaseAnalysis, OpponentAnalysis), | |||
#{ | |||
base_analysis => BaseAnalysis, | |||
opponent_analysis => OpponentAnalysis, | |||
pattern_analysis => PatternAnalysis, | |||
win_probability => WinProbability, | |||
suggested_strategies => suggest_strategies(State, WinProbability) | |||
}. | |||
%% 高级决策系统 | |||
make_decision(State, GameState) -> | |||
% 获取当前局势分析 | |||
SituationAnalysis = analyze_situation(State, GameState), | |||
% 生成所有可能的行动 | |||
PossibleActions = generate_possible_actions(GameState), | |||
% 使用蒙特卡洛树搜索评估行动 | |||
EvaluatedActions = monte_carlo_tree_search(State, PossibleActions, GameState), | |||
% 应用强化学习模型 | |||
RefinedActions = apply_reinforcement_learning(State, EvaluatedActions), | |||
% 选择最佳行动 | |||
select_best_action(RefinedActions, SituationAnalysis). | |||
%% 增强学习功能 | |||
learn_from_game(State, GameRecord) -> | |||
% 更新对手模型 | |||
UpdatedOpponentModels = update_opponent_models(State, GameRecord), | |||
% 更新策略模型 | |||
UpdatedStrategyModel = update_strategy_model(State, GameRecord), | |||
% 更新牌型数据库 | |||
UpdatedPatternDB = update_pattern_database(State, GameRecord), | |||
% 应用深度学习更新 | |||
apply_deep_learning_update(State, GameRecord), | |||
State#advanced_ai_state{ | |||
strategy_model = UpdatedStrategyModel, | |||
opponent_models = UpdatedOpponentModels, | |||
pattern_database = UpdatedPatternDB | |||
}. | |||
%% 内部函数 | |||
%% 基础局势分析 | |||
basic_situation_analysis(GameState) -> | |||
#{ | |||
hand_strength => evaluate_hand_strength(GameState), | |||
control_level => evaluate_control_level(GameState), | |||
game_stage => determine_game_stage(GameState), | |||
remaining_key_cards => analyze_remaining_key_cards(GameState) | |||
}. | |||
%% 对手分析 | |||
analyze_opponents(State, GameState) -> | |||
OpponentModels = State#advanced_ai_state.opponent_models, | |||
lists:map( | |||
fun(Opponent) -> | |||
Model = maps:get(Opponent, OpponentModels, create_new_opponent_model()), | |||
analyze_single_opponent(Model, Opponent, GameState) | |||
end, | |||
get_opponents(GameState) | |||
). | |||
%% 牌型分析 | |||
analyze_card_patterns(State, GameState) -> | |||
PatternDB = State#advanced_ai_state.pattern_database, | |||
CurrentHand = get_current_hand(GameState), | |||
#{ | |||
available_patterns => find_available_patterns(CurrentHand, PatternDB), | |||
pattern_strength => evaluate_pattern_strength(CurrentHand, PatternDB), | |||
combo_opportunities => identify_combo_opportunities(CurrentHand, PatternDB) | |||
}. | |||
%% 初始化基本牌型 | |||
init_basic_patterns() -> | |||
#{ | |||
singles => [], | |||
pairs => [], | |||
triples => [], | |||
sequences => [], | |||
bombs => [] | |||
}. | |||
%% 初始化复杂牌型 | |||
init_complex_patterns() -> | |||
#{ | |||
airplane => [], | |||
four_with_two => [], | |||
three_with_one => [], | |||
double_sequence => [] | |||
}. | |||
%% 初始化牌型权重 | |||
init_pattern_weights() -> | |||
#{ | |||
bomb => 1.0, | |||
sequence => 0.8, | |||
triple => 0.6, | |||
pair => 0.4, | |||
single => 0.2 | |||
}. | |||
%% 生成可能的行动 | |||
generate_possible_actions(GameState) -> | |||
Cards = get_current_hand(GameState), | |||
LastPlay = get_last_play(GameState), | |||
generate_valid_plays(Cards, LastPlay). | |||
%% 生成有效的出牌选择 | |||
generate_valid_plays(Cards, LastPlay) -> | |||
case LastPlay of | |||
[] -> generate_all_plays(Cards); | |||
_ -> generate_greater_plays(Cards, LastPlay) | |||
end. | |||
%% 获取当前手牌 | |||
get_current_hand(GameState) -> | |||
maps:get(hand_cards, GameState, []). | |||
%% 获取上一手牌 | |||
get_last_play(GameState) -> | |||
maps:get(last_play, GameState, []). | |||
%% 获取对手列表 | |||
get_opponents(GameState) -> | |||
maps:get(opponents, GameState, []). | |||
%% 分析单个对手 | |||
analyze_single_opponent(Model, Opponent, GameState) -> | |||
#{ | |||
play_style => analyze_play_style(Model, Opponent), | |||
remaining_cards => estimate_remaining_cards(Model, GameState), | |||
threat_level => calculate_threat_level(Model, GameState) | |||
}. | |||
%% 计算胜率 | |||
calculate_win_probability(State, BaseAnalysis, OpponentAnalysis) -> | |||
HandStrength = maps:get(hand_strength, BaseAnalysis), | |||
ControlLevel = maps:get(control_level, BaseAnalysis), | |||
ThreatLevel = calculate_average_threat(OpponentAnalysis), | |||
BaseProb = (HandStrength * 0.4) + (ControlLevel * 0.3) + ((1 - ThreatLevel) * 0.3), | |||
adjust_probability(BaseProb, State). | |||
%% 计算平均威胁度 | |||
calculate_average_threat(OpponentAnalysis) -> | |||
TotalThreat = lists:foldl( | |||
fun(Analysis, Acc) -> | |||
Acc + maps:get(threat_level, Analysis, 0.0) | |||
end, | |||
0.0, | |||
OpponentAnalysis | |||
), | |||
length(OpponentAnalysis) > 0 andalso TotalThreat / length(OpponentAnalysis). | |||
%% 调整概率 | |||
adjust_probability(BaseProb, State) -> | |||
StrategyModel = State#advanced_ai_state.strategy_model, | |||
Adjustment = calculate_strategy_adjustment(StrategyModel), | |||
max(0.0, min(1.0, BaseProb + Adjustment)). | |||
%% 计算策略调整 | |||
calculate_strategy_adjustment(StrategyModel) -> | |||
Parameters = maps:get(parameters, StrategyModel, #{}), | |||
RiskFactor = maps:get(risk_factor, Parameters, 0.5), | |||
(RiskFactor - 0.5) * 0.2. | |||
%% 选择最佳行动 | |||
select_best_action(RefinedActions, SituationAnalysis) -> | |||
case RefinedActions of | |||
[] -> pass; | |||
Actions -> | |||
{BestAction, _Score} = lists:max(Actions), | |||
BestAction | |||
end. | |||
%% 蒙特卡洛树搜索 | |||
monte_carlo_tree_search(State, Actions, GameState) -> | |||
MaxIterations = 1000, | |||
lists:map( | |||
fun(Action) -> | |||
Score = run_mcts_simulation(State, Action, GameState, MaxIterations), | |||
{Action, Score} | |||
end, | |||
Actions | |||
). | |||
%% MCTS模拟 | |||
run_mcts_simulation(State, Action, GameState, MaxIterations) -> | |||
Root = create_mcts_node(GameState, Action), | |||
lists:foldl( | |||
fun(_, Score) -> | |||
SimulationResult = simulate_game(State, Root), | |||
update_mcts_statistics(Root, SimulationResult), | |||
calculate_ucb_score(Root) | |||
end, | |||
0, | |||
lists:seq(1, MaxIterations) | |||
). | |||
%% 强化学习应用 | |||
apply_reinforcement_learning(State, EvaluatedActions) -> | |||
LearningModel = State#advanced_ai_state.learning_model, | |||
lists:map( | |||
fun({Action, Score}) -> | |||
RefinedScore = apply_learning_policy(LearningModel, Action, Score), | |||
{Action, RefinedScore} | |||
end, | |||
EvaluatedActions | |||
). | |||
%% 深度学习更新 | |||
apply_deep_learning_update(State, GameRecord) -> | |||
% 提取特征 | |||
Features = extract_game_features(GameRecord), | |||
% 准备训练数据 | |||
TrainingData = prepare_training_data(Features, GameRecord), | |||
% 更新模型 | |||
update_deep_learning_model(State#advanced_ai_state.learning_model, TrainingData). | |||
%% 策略建议生成 | |||
suggest_strategies(State, WinProbability) -> | |||
case WinProbability of | |||
P when P >= 0.7 -> | |||
[aggressive_push, maintain_control]; | |||
P when P >= 0.4 -> | |||
[balanced_play, seek_opportunities]; | |||
_ -> | |||
[defensive_play, preserve_key_cards] | |||
end. | |||
%% 高级模式识别 | |||
identify_advanced_patterns(Cards, PatternDB) -> | |||
BasePatterns = find_base_patterns(Cards), | |||
ComplexPatterns = find_complex_patterns(Cards), | |||
SpecialCombos = find_special_combinations(Cards, PatternDB), | |||
#{ | |||
base_patterns => BasePatterns, | |||
complex_patterns => ComplexPatterns, | |||
special_combos => SpecialCombos, | |||
pattern_value => evaluate_pattern_combination_value(BasePatterns, ComplexPatterns, SpecialCombos) | |||
}. | |||
%% 对手建模 | |||
create_new_opponent_model() -> | |||
#{ | |||
play_style => undefined, | |||
pattern_preferences => #{}, | |||
risk_tendency => 0.5, | |||
skill_level => 0.5, | |||
historical_plays => [] | |||
}. | |||
%% 更新对手模型 | |||
update_opponent_models(State, GameRecord) -> | |||
lists:foldl( | |||
fun(Play, Models) -> | |||
update_single_opponent_model(Models, Play) | |||
end, | |||
State#advanced_ai_state.opponent_models, | |||
extract_plays(GameRecord) | |||
). | |||
%% 策略评估 | |||
evaluate_strategy_effectiveness(Strategy, GameState) -> | |||
ControlFactor = evaluate_control_factor(Strategy, GameState), | |||
TempoFactor = evaluate_tempo_factor(Strategy, GameState), | |||
RiskFactor = evaluate_risk_factor(Strategy, GameState), | |||
(ControlFactor * 0.4) + (TempoFactor * 0.3) + (RiskFactor * 0.3). |
@ -1,97 +0,0 @@ | |||
-module(doudizhu_ai). | |||
-behaviour(gen_server). | |||
-export([ | |||
start_link/1, | |||
make_decision/2, | |||
analyze_cards/1, | |||
update_game_state/2 | |||
]). | |||
-export([ | |||
init/1, | |||
handle_call/3, | |||
handle_cast/2, | |||
handle_info/2, | |||
terminate/2, | |||
code_change/3 | |||
]). | |||
-include("card_types.hrl"). | |||
%% API 函数 | |||
start_link(PlayerId) -> | |||
gen_server:start_link({local, ?MODULE}, ?MODULE, [PlayerId], []). | |||
make_decision(GameState, Options) -> | |||
gen_server:call(?MODULE, {make_decision, GameState, Options}). | |||
analyze_cards(Cards) -> | |||
gen_server:call(?MODULE, {analyze_cards, Cards}). | |||
update_game_state(Event, Data) -> | |||
gen_server:cast(?MODULE, {update_game_state, Event, Data}). | |||
%% Callback 函数 | |||
init([PlayerId]) -> | |||
{ok, #state{player_id = PlayerId}}. | |||
handle_call({make_decision, GameState, Options}, _From, State) -> | |||
{Decision, NewState} = calculate_best_move(GameState, Options, State), | |||
{reply, Decision, NewState}; | |||
handle_call({analyze_cards, Cards}, _From, State) -> | |||
Analysis = perform_cards_analysis(Cards, State), | |||
{reply, Analysis, State}; | |||
handle_call(_Request, _From, State) -> | |||
{reply, ok, State}. | |||
handle_cast({update_game_state, Event, Data}, State) -> | |||
NewState = process_game_event(Event, Data, State), | |||
{noreply, NewState}; | |||
handle_cast(_Msg, State) -> | |||
{noreply, State}. | |||
handle_info(_Info, State) -> | |||
{noreply, State}. | |||
terminate(_Reason, _State) -> | |||
ok. | |||
code_change(_OldVsn, State, _Extra) -> | |||
{ok, State}. | |||
%% 内部函数 | |||
% 计算最佳移动 | |||
calculate_best_move(GameState, Options, State) -> | |||
% 使用策略模块计算最佳移动 | |||
Strategy = doudizhu_ai_strategy:choose_strategy(GameState, State), | |||
Move = doudizhu_ai_strategy:execute_strategy(Strategy, GameState, State), | |||
{Move, State#state{strategy_cache = #{current_strategy => Strategy}}}. | |||
% 分析手牌 | |||
perform_cards_analysis(Cards, State) -> | |||
% 简化的手牌分析 | |||
HandValue = doudizhu_ai_strategy:analyze_hand_value(Cards), | |||
{hand_value, HandValue}. | |||
% 处理游戏事件 | |||
process_game_event(card_played, {Player, Cards}, State) -> | |||
% 更新已知牌信息 | |||
NewPlayedCards = [{Player, Cards} | State#state.played_cards], | |||
State#state{played_cards = NewPlayedCards}; | |||
process_game_event(new_hand, Cards, State) -> | |||
% 设置新的手牌 | |||
State#state{hand_cards = Cards}; | |||
process_game_event(role_assigned, Role, State) -> | |||
% 设置角色(地主或农民) | |||
State#state{role = Role}; | |||
process_game_event(_, _, State) -> | |||
% 默认情况下不改变状态 | |||
State. |
@ -1,81 +0,0 @@ | |||
-module(ml_engine). | |||
-behaviour(gen_server). | |||
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). | |||
-export([train/2, predict/2, update_model/2]). | |||
-record(state, { | |||
model_type = basic, | |||
model_data = #{}, | |||
training_history = [], | |||
performance_metrics = #{} | |||
}). | |||
%% API | |||
start_link() -> | |||
gen_server:start_link(?MODULE, [], []). | |||
train(Pid, TrainingData) -> | |||
gen_server:call(Pid, {train, TrainingData}). | |||
predict(Pid, InputData) -> | |||
gen_server:call(Pid, {predict, InputData}). | |||
update_model(Pid, ModelData) -> | |||
gen_server:call(Pid, {update_model, ModelData}). | |||
%% Callbacks | |||
init([]) -> | |||
{ok, #state{}}. | |||
handle_call({train, TrainingData}, _From, State) -> | |||
% 简化的训练逻辑 | |||
NewModelData = train_model(State#state.model_data, TrainingData), | |||
NewHistory = [TrainingData | State#state.training_history], | |||
NewState = State#state{ | |||
model_data = NewModelData, | |||
training_history = NewHistory | |||
}, | |||
{reply, {ok, compute_metrics(NewState)}, NewState}; | |||
handle_call({predict, InputData}, _From, State) -> | |||
Result = predict_with_model(State#state.model_data, InputData), | |||
{reply, {ok, Result}, State}; | |||
handle_call({update_model, ModelData}, _From, State) -> | |||
NewState = State#state{model_data = ModelData}, | |||
{reply, ok, NewState}; | |||
handle_call(_Request, _From, State) -> | |||
{reply, {error, unknown_call}, State}. | |||
handle_cast(_Msg, State) -> | |||
{noreply, State}. | |||
handle_info(_Info, State) -> | |||
{noreply, State}. | |||
terminate(_Reason, _State) -> | |||
ok. | |||
code_change(_OldVsn, State, _Extra) -> | |||
{ok, State}. | |||
%% 内部函数 | |||
% 训练模型 | |||
train_model(ModelData, TrainingData) -> | |||
% 简化的训练实现 | |||
% 实际应该实现更复杂的机器学习算法 | |||
maps:merge(ModelData, TrainingData). | |||
% 使用模型进行预测 | |||
predict_with_model(ModelData, InputData) -> | |||
% 简化的预测实现 | |||
% 实际应该使用训练好的模型进行预测 | |||
{prediction, 0.75}. | |||
% 计算性能指标 | |||
compute_metrics(State) -> | |||
% 简化的性能指标计算 | |||
#{accuracy => 0.8, loss => 0.2}. |
@ -1,76 +0,0 @@ | |||
-module(performance_monitor). | |||
-behaviour(gen_server). | |||
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). | |||
-export([start_monitoring/1, stop_monitoring/1, get_metrics/1, generate_report/1]). | |||
-record(state, { | |||
monitors = #{}, % 监控对象集合 | |||
metrics = #{}, % 性能指标数据 | |||
alerts = [], % 告警信息 | |||
start_time = undefined | |||
}). | |||
-record(monitor_data, { | |||
type, % 监控类型 | |||
metrics = [], % 指标列表 | |||
threshold = #{}, % 阈值设置 | |||
callback % 回调函数 | |||
}). | |||
%% API | |||
start_link() -> | |||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). | |||
start_monitoring(Target) -> | |||
gen_server:call(?MODULE, {start_monitoring, Target}). | |||
stop_monitoring(Target) -> | |||
gen_server:call(?MODULE, {stop_monitoring, Target}). | |||
get_metrics(Target) -> | |||
gen_server:call(?MODULE, {get_metrics, Target}). | |||
generate_report(Target) -> | |||
gen_server:call(?MODULE, {generate_report, Target}). | |||
%% 内部函数 | |||
collect_metrics(Target) -> | |||
% 收集各种性能指标 | |||
#{ | |||
cpu_usage => get_cpu_usage(Target), | |||
memory_usage => get_memory_usage(Target), | |||
response_time => get_response_time(Target), | |||
throughput => get_throughput(Target) | |||
}. | |||
analyze_performance(Metrics) -> | |||
% 分析性能数据 | |||
#{ | |||
avg_response_time => calculate_average(maps:get(response_time, Metrics)), | |||
peak_memory => get_peak_value(maps:get(memory_usage, Metrics)), | |||
bottlenecks => identify_bottlenecks(Metrics) | |||
}. | |||
generate_alerts(Metrics, Thresholds) -> | |||
% 生成性能告警 | |||
lists:filtermap( | |||
fun({Metric, Value}) -> | |||
case check_threshold(Metric, Value, Thresholds) of | |||
{true, Alert} -> {true, Alert}; | |||
false -> false | |||
end | |||
end, | |||
maps:to_list(Metrics) | |||
). | |||
create_report(Target, Metrics) -> | |||
% 生成性能报告 | |||
#{ | |||
target => Target, | |||
timestamp => os:timestamp(), | |||
metrics => Metrics, | |||
analysis => analyze_performance(Metrics), | |||
recommendations => generate_recommendations(Metrics) | |||
}. |
@ -1,80 +0,0 @@ | |||
-module(performance_optimization). | |||
-behaviour(gen_server). | |||
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]). | |||
-export([optimize_resources/0, get_performance_stats/0]). | |||
-record(state, { | |||
resource_usage = #{}, | |||
optimization_rules = #{}, | |||
performance_history = [] | |||
}). | |||
start_link() -> | |||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). | |||
init([]) -> | |||
schedule_optimization(), | |||
{ok, #state{}}. | |||
optimize_resources() -> | |||
gen_server:cast(?MODULE, optimize). | |||
get_performance_stats() -> | |||
gen_server:call(?MODULE, get_stats). | |||
%% 内部函数 | |||
schedule_optimization() -> | |||
erlang:send_after(60000, self(), run_optimization). | |||
analyze_resource_usage() -> | |||
{ok, Usage} = cpu_sup:util([detailed]), | |||
{ok, Memory} = memsup:get_system_memory_data(), | |||
#{ | |||
cpu => Usage, | |||
memory => Memory, | |||
process_count => erlang:system_info(process_count) | |||
}. | |||
%% 处理同步调用 | |||
handle_call(get_stats, _From, State) -> | |||
{reply, {ok, State#state.performance_history}, State}; | |||
handle_call(_Request, _From, State) -> | |||
{reply, {error, unknown_call}, State}. | |||
%% 处理异步消息 | |||
handle_cast(optimize, State) -> | |||
ResourceUsage = analyze_resource_usage(), | |||
OptimizationActions = calculate_optimization_actions(ResourceUsage, State#state.optimization_rules), | |||
NewState = apply_optimization_actions(OptimizationActions, State), | |||
{noreply, NewState#state{resource_usage = ResourceUsage}}; | |||
handle_cast(_Msg, State) -> | |||
{noreply, State}. | |||
%% 处理信息消息 | |||
handle_info(run_optimization, State) -> | |||
ResourceUsage = analyze_resource_usage(), | |||
OptimizationActions = calculate_optimization_actions(ResourceUsage, State#state.optimization_rules), | |||
NewState = apply_optimization_actions(OptimizationActions, State), | |||
schedule_optimization(), | |||
{noreply, NewState#state{resource_usage = ResourceUsage}}; | |||
handle_info(_Info, State) -> | |||
{noreply, State}. | |||
%% 终止回调 | |||
terminate(_Reason, _State) -> | |||
ok. | |||
%% 代码变更回调 | |||
code_change(_OldVsn, State, _Extra) -> | |||
{ok, State}. | |||
%% 辅助函数 | |||
calculate_optimization_actions(ResourceUsage, Rules) -> | |||
% 简化版实现 | |||
[balance_load, free_memory]. | |||
apply_optimization_actions(Actions, State) -> | |||
% 简化版实现 | |||
NewHistory = [{os:timestamp(), Actions} | State#state.performance_history], | |||
State#state{performance_history = lists:sublist(NewHistory, 100)}. |
@ -0,0 +1,658 @@ | |||
-module(xx). | |||
-export([ | |||
init/1, | |||
make_decision/2, | |||
evaluate_hand/1, | |||
predict_plays/2, | |||
update_knowledge/2, | |||
calculate_win_rate/1 | |||
]). | |||
-include("card_types.hrl"). | |||
-import(game_logic, [validate_play/2, analyze_card_pattern/1, calculate_card_value/1, evaluate_hand_strength/1,find_singles/1]). | |||
-record(ai_state, { | |||
role, % dizhu | nongmin | |||
hand_cards = [], % 当前手牌 | |||
known_cards = [], % 已知的牌 | |||
played_cards = [], % 已打出的牌 | |||
player_history = [], % 玩家出牌历史 | |||
game_stage, % early_game | mid_game | end_game | |||
strategy_cache = #{} % 策略缓存 | |||
}). | |||
-record(play_context, { | |||
last_play, % 上一手牌 | |||
cards_remaining, % 剩余手牌数 | |||
num_greater_cards, % 比上家大的牌数量 | |||
control_factor, % 控制因子 | |||
must_play = false % 是否必须出牌 | |||
}). | |||
%% 初始化AI状态 | |||
init(Role) -> | |||
#ai_state{ | |||
role = Role, | |||
game_stage = early_game | |||
}. | |||
%% 做出决策 | |||
make_decision(AIState, GameState) -> | |||
Context = analyze_context(AIState, GameState), | |||
case should_play(Context, AIState) of | |||
true -> | |||
select_best_play(Context, AIState); | |||
false -> | |||
{pass, AIState} | |||
end. | |||
%% 评估手牌 | |||
evaluate_hand(Cards) -> | |||
Components = analyze_components(Cards), | |||
calculate_hand_value(Components). | |||
%% 预测可能的出牌 | |||
predict_plays(Cards, LastPlay) -> | |||
ValidPlays = generate_valid_plays(Cards, LastPlay), | |||
score_potential_plays(ValidPlays, LastPlay). | |||
%% 更新知识库 | |||
update_knowledge(AIState, Event) -> | |||
update_state_with_event(AIState, Event). | |||
%% 计算胜率 | |||
calculate_win_rate(AIState) -> | |||
HandStrength = evaluate_hand(AIState#ai_state.hand_cards), | |||
PositionValue = evaluate_position(AIState), | |||
ControlValue = evaluate_control(AIState), | |||
calculate_probability(HandStrength, PositionValue, ControlValue). | |||
%% 内部函数 | |||
%% 分析上下文 | |||
analyze_context(AIState, GameState) -> | |||
LastPlay = get_last_play(GameState), | |||
CardsRemaining = length(AIState#ai_state.hand_cards), | |||
GreaterCards = count_greater_cards(AIState#ai_state.hand_cards, LastPlay), | |||
ControlFactor = calculate_control_factor(AIState, GameState), | |||
#play_context{ | |||
last_play = LastPlay, | |||
cards_remaining = CardsRemaining, | |||
num_greater_cards = GreaterCards, | |||
control_factor = ControlFactor, | |||
must_play = must_play(AIState, GameState) | |||
}. | |||
%% 判断是否应该出牌 | |||
should_play(Context, AIState) -> | |||
case Context#play_context.must_play of | |||
true -> true; | |||
false -> | |||
case Context#play_context.last_play of | |||
none -> true; | |||
_ -> | |||
should_beat_last_play(Context, AIState) | |||
end | |||
end. | |||
%% 选择最佳出牌 | |||
select_best_play(Context, AIState) -> | |||
Candidates = generate_candidates(AIState#ai_state.hand_cards, Context), | |||
ScoredPlays = [ | |||
{score_play(Play, Context, AIState), Play} | |||
|| Play <- Candidates | |||
], | |||
select_highest_scored_play(ScoredPlays, AIState). | |||
%% 分析牌型组件 | |||
analyze_components(Cards) -> | |||
GroupedCards = group_cards_by_value(Cards), | |||
#{ | |||
singles => find_singles(GroupedCards), | |||
pairs => find_pairs(GroupedCards), | |||
triples => find_triples(GroupedCards), | |||
sequences => find_sequences(GroupedCards), | |||
bombs => find_bombs(GroupedCards) | |||
}. | |||
%% 计算手牌价值 | |||
calculate_hand_value(Components) -> | |||
SinglesValue = calculate_singles_value(maps:get(singles, Components, [])), | |||
PairsValue = calculate_pairs_value(maps:get(pairs, Components, [])), | |||
TriplesValue = calculate_triples_value(maps:get(triples, Components, [])), | |||
SequencesValue = calculate_sequences_value(maps:get(sequences, Components, [])), | |||
BombsValue = calculate_bombs_value(maps:get(bombs, Components, [])), | |||
SinglesValue + PairsValue + TriplesValue + SequencesValue + BombsValue. | |||
%% 生成有效出牌选择 | |||
generate_valid_plays(Cards, LastPlay) -> | |||
case LastPlay of | |||
none -> | |||
generate_leading_plays(Cards); | |||
Play -> | |||
generate_following_plays(Cards, Play) | |||
end. | |||
%% 评分潜在出牌 | |||
score_potential_plays(Plays, LastPlay) -> | |||
[{Play, score_play_potential(Play, LastPlay)} || Play <- Plays]. | |||
%% 更新状态 | |||
update_state_with_event(AIState, {play_cards, Player, Cards}) -> | |||
NewPlayed = AIState#ai_state.played_cards ++ Cards, | |||
NewHistory = [{Player, Cards} | AIState#ai_state.player_history], | |||
AIState#ai_state{ | |||
played_cards = NewPlayed, | |||
player_history = NewHistory | |||
}; | |||
update_state_with_event(AIState, {game_stage, NewStage}) -> | |||
AIState#ai_state{game_stage = NewStage}; | |||
update_state_with_event(AIState, _) -> | |||
AIState. | |||
%% 计算胜率概率 | |||
calculate_probability(HandStrength, PositionValue, ControlValue) -> | |||
BaseProb = HandStrength * 0.5 + PositionValue * 0.3 + ControlValue * 0.2, | |||
normalize_probability(BaseProb). | |||
%% 计算控制因子 | |||
calculate_control_factor(AIState, GameState) -> | |||
ControlCards = count_control_cards(AIState#ai_state.hand_cards), | |||
TotalCards = count_total_remaining_cards(GameState), | |||
RemainingCards = length(AIState#ai_state.hand_cards), | |||
ControlRatio = ControlCards / max(1, RemainingCards), | |||
PositionBonus = calculate_position_bonus(AIState, GameState), | |||
ControlRatio * PositionBonus. | |||
%% 判断是否必须出牌 | |||
must_play(AIState, GameState) -> | |||
is_current_player(AIState, GameState) andalso | |||
not has_active_play(GameState). | |||
%% 评估是否应该大过上家 | |||
should_beat_last_play(Context, AIState) -> | |||
case Context#play_context.last_play of | |||
none -> true; | |||
LastPlay -> | |||
HandStrength = evaluate_hand(AIState#ai_state.hand_cards), | |||
ControlLevel = Context#play_context.control_factor, | |||
CardsLeft = Context#play_context.cards_remaining, | |||
should_beat(HandStrength, ControlLevel, CardsLeft, LastPlay) | |||
end. | |||
%% 生成候选出牌 | |||
generate_candidates(Cards, Context) -> | |||
BasePlays = case Context#play_context.last_play of | |||
none -> generate_leading_plays(Cards); | |||
LastPlay -> generate_following_plays(Cards, LastPlay) | |||
end, | |||
filter_candidates(BasePlays, Context). | |||
%% 对出牌进行评分 | |||
score_play(Play, Context, AIState) -> | |||
BaseScore = calculate_base_score(Play), | |||
TempoScore = calculate_tempo_score(Play, Context), | |||
ControlScore = calculate_control_score(Play, Context, AIState), | |||
EfficiencyScore = calculate_efficiency_score(Play, Context), | |||
FinalScore = BaseScore * 0.4 + | |||
TempoScore * 0.2 + | |||
ControlScore * 0.3 + | |||
EfficiencyScore * 0.1, | |||
adjust_score_for_context(FinalScore, Play, Context, AIState). | |||
%% 选择得分最高的出牌 | |||
select_highest_scored_play(ScoredPlays, AIState) -> | |||
case lists:sort(fun({Score1, _}, {Score2, _}) -> | |||
Score1 >= Score2 | |||
end, ScoredPlays) of | |||
[{Score, Play}|_] when Score > 0 -> | |||
{play, Play, update_after_play(Play, AIState)}; | |||
_ -> | |||
{pass, AIState} | |||
end. | |||
%% 计算单牌价值 | |||
calculate_singles_value(Singles) -> | |||
lists:sum([ | |||
case Value of | |||
V when V >= ?CARD_2 -> Value * 1.5; | |||
_ -> Value | |||
end || {Value, _} <- Singles | |||
]). | |||
%% 计算对子价值 | |||
calculate_pairs_value(Pairs) -> | |||
lists:sum([Value * 2.2 || {Value, _} <- Pairs]). | |||
%% 计算三张价值 | |||
calculate_triples_value(Triples) -> | |||
lists:sum([Value * 3.5 || {Value, _} <- Triples]). | |||
%% 计算顺子价值 | |||
calculate_sequences_value(Sequences) -> | |||
lists:sum([ | |||
Value * length(Cards) * 1.8 | |||
|| {Value, Cards} <- Sequences | |||
]). | |||
%% 计算炸弹价值 | |||
calculate_bombs_value(Bombs) -> | |||
lists:sum([Value * 10.0 || {Value, _} <- Bombs]). | |||
%% 生成首出牌型 | |||
generate_leading_plays(Cards) -> | |||
Components = analyze_components(Cards), | |||
Singles = generate_single_plays(Components), | |||
Pairs = generate_pair_plays(Components), | |||
Triples = generate_triple_plays(Components), | |||
Sequences = generate_sequence_plays(Components), | |||
Bombs = generate_bomb_plays(Components), | |||
Singles ++ Pairs ++ Triples ++ Sequences ++ Bombs. | |||
%% 生成跟牌选择 | |||
generate_following_plays(Cards, {Type, Value, _} = LastPlay) -> | |||
ValidPlays = find_greater_plays(Cards, Type, Value), | |||
BombPlays = find_bomb_plays(Cards), | |||
RocketPlay = find_rocket_play(Cards), | |||
ValidPlays ++ BombPlays ++ RocketPlay. | |||
%% 计算基础分数 | |||
calculate_base_score({Type, Value, Cards}) -> | |||
BaseValue = Value * length(Cards), | |||
TypeMultiplier = case Type of | |||
?CARD_TYPE_ROCKET -> 100.0; | |||
?CARD_TYPE_BOMB -> 80.0; | |||
?CARD_TYPE_STRAIGHT -> 40.0; | |||
?CARD_TYPE_STRAIGHT_PAIR -> 35.0; | |||
?CARD_TYPE_PLANE -> 30.0; | |||
?CARD_TYPE_THREE_TWO -> 25.0; | |||
?CARD_TYPE_THREE_ONE -> 20.0; | |||
?CARD_TYPE_THREE -> 15.0; | |||
?CARD_TYPE_PAIR -> 10.0; | |||
?CARD_TYPE_SINGLE -> 5.0 | |||
end, | |||
BaseValue * TypeMultiplier / 100.0. | |||
%% 计算节奏分数 | |||
calculate_tempo_score(Play, Context) -> | |||
case Context#play_context.game_stage of | |||
early_game -> calculate_early_tempo(Play, Context); | |||
mid_game -> calculate_mid_tempo(Play, Context); | |||
end_game -> calculate_end_tempo(Play, Context) | |||
end. | |||
%% 计算控制分数 | |||
calculate_control_score(Play, Context, AIState) -> | |||
{Type, Value, _} = Play, | |||
BaseControl = case Type of | |||
?CARD_TYPE_BOMB -> 1.0; | |||
?CARD_TYPE_ROCKET -> 1.0; | |||
_ when Value >= ?CARD_2 -> 0.8; | |||
_ -> 0.5 | |||
end, | |||
BaseControl * Context#play_context.control_factor. | |||
%% 计算效率分数 | |||
calculate_efficiency_score(Play, Context) -> | |||
{_, _, Cards} = Play, | |||
CardsUsed = length(Cards), | |||
RemainingCards = Context#play_context.cards_remaining - CardsUsed, | |||
Efficiency = CardsUsed / max(1, Context#play_context.cards_remaining), | |||
Efficiency * (1 + (20 - RemainingCards) / 20). | |||
%% 根据上下文调整分数 | |||
adjust_score_for_context(Score, Play, Context, AIState) -> | |||
RoleMultiplier = case AIState#ai_state.role of | |||
dizhu -> 1.2; | |||
nongmin -> 1.0 | |||
end, | |||
StageMultiplier = case Context#play_context.game_stage of | |||
early_game -> 0.9; | |||
mid_game -> 1.0; | |||
end_game -> 1.1 | |||
end, | |||
Score * RoleMultiplier * StageMultiplier. | |||
%% 更新出牌后的状态 | |||
update_after_play(Play, AIState) -> | |||
{_, _, Cards} = Play, | |||
NewHand = AIState#ai_state.hand_cards -- Cards, | |||
NewPlayed = AIState#ai_state.played_cards ++ Cards, | |||
AIState#ai_state{ | |||
hand_cards = NewHand, | |||
played_cards = NewPlayed | |||
}. | |||
%% 辅助函数 | |||
normalize_probability(P) -> | |||
max(0.0, min(1.0, P)). | |||
count_control_cards(Cards) -> | |||
length([C || {V, _} = C <- Cards, V >= ?CARD_2]). | |||
calculate_position_bonus(AIState, GameState) -> | |||
case get_position(AIState, GameState) of | |||
first -> 1.2; | |||
middle -> 1.0; | |||
last -> 0.8 | |||
end. | |||
get_position(AIState, GameState) -> | |||
% 根据游戏状态判断位置 | |||
first. % 简化实现,实际需要根据具体游戏状态判断 | |||
is_current_player(AIState, GameState) -> | |||
% 判断是否当前玩家 | |||
true. % 简化实现,实际需要根据具体游戏状态判断 | |||
has_active_play(GameState) -> | |||
% 判断是否有活跃的出牌 | |||
false. % 简化实现,实际需要根据具体游戏状态判断 | |||
should_beat(HandStrength, ControlLevel, CardsLeft, LastPlay) -> | |||
BaseThreshold = 0.6, | |||
StrengthFactor = HandStrength / 100, | |||
ControlFactor = ControlLevel / 100, | |||
CardsFactor = (20 - CardsLeft) / 20, | |||
PlayThreshold = BaseThreshold * (StrengthFactor + ControlFactor + CardsFactor) / 3, | |||
evaluate_play_value(LastPlay) < PlayThreshold. | |||
evaluate_play_value({Type, Value, Cards}) -> | |||
BaseValue = case Type of | |||
?CARD_TYPE_ROCKET -> 1.0; | |||
?CARD_TYPE_BOMB -> 0.9; | |||
?CARD_TYPE_PLANE -> 0.7; | |||
?CARD_TYPE_STRAIGHT -> 0.6; | |||
?CARD_TYPE_STRAIGHT_PAIR -> 0.5; | |||
?CARD_TYPE_THREE_TWO -> 0.4; | |||
?CARD_TYPE_THREE_ONE -> 0.3; | |||
?CARD_TYPE_THREE -> 0.25; | |||
?CARD_TYPE_PAIR -> 0.2; | |||
?CARD_TYPE_SINGLE -> 0.1 | |||
end, | |||
ValueBonus = Value / ?CARD_JOKER_BIG, | |||
CardCountFactor = length(Cards) / 10, | |||
BaseValue * (1 + ValueBonus) * (1 + CardCountFactor). | |||
%% 牌型生成函数 | |||
generate_single_plays(Components) -> | |||
[{?CARD_TYPE_SINGLE, Value, [Card]} || | |||
{Value, Card} <- maps:get(singles, Components, [])]. | |||
generate_pair_plays(Components) -> | |||
[{?CARD_TYPE_PAIR, Value, Cards} || | |||
{Value, Cards} <- maps:get(pairs, Components, [])]. | |||
generate_triple_plays(Components) -> | |||
Triples = maps:get(triples, Components, []), | |||
BasicTriples = [{?CARD_TYPE_THREE, Value, Cards} || | |||
{Value, Cards} <- Triples], | |||
% 生成三带一和三带二 | |||
generate_triple_combinations(Triples, Components). | |||
generate_sequence_plays(Components) -> | |||
Sequences = maps:get(sequences, Components, []), | |||
[{?CARD_TYPE_STRAIGHT, Value, Cards} || | |||
{Value, Cards} <- Sequences]. | |||
generate_bomb_plays(Components) -> | |||
[{?CARD_TYPE_BOMB, Value, Cards} || | |||
{Value, Cards} <- maps:get(bombs, Components, [])]. | |||
generate_triple_combinations(Triples, Components) -> | |||
Singles = maps:get(singles, Components, []), | |||
Pairs = maps:get(pairs, Components, []), | |||
ThreeOne = generate_three_one(Triples, Singles), | |||
ThreeTwo = generate_three_two(Triples, Pairs), | |||
ThreeOne ++ ThreeTwo. | |||
generate_three_one(Triples, Singles) -> | |||
[{?CARD_TYPE_THREE_ONE, TripleValue, TripleCards ++ [SingleCard]} || | |||
{TripleValue, TripleCards} <- Triples, | |||
{SingleValue, SingleCard} <- Singles, | |||
SingleValue =/= TripleValue]. | |||
generate_three_two(Triples, Pairs) -> | |||
[{?CARD_TYPE_THREE_TWO, TripleValue, TripleCards ++ PairCards} || | |||
{TripleValue, TripleCards} <- Triples, | |||
{PairValue, PairCards} <- Pairs, | |||
PairValue =/= TripleValue]. | |||
%% 查找特定牌型 | |||
find_greater_plays(Cards, Type, MinValue) -> | |||
Components = analyze_components(Cards), | |||
case Type of | |||
?CARD_TYPE_SINGLE -> | |||
find_greater_singles(Components, MinValue); | |||
?CARD_TYPE_PAIR -> | |||
find_greater_pairs(Components, MinValue); | |||
?CARD_TYPE_THREE -> | |||
find_greater_triples(Components, MinValue); | |||
?CARD_TYPE_THREE_ONE -> | |||
find_greater_three_one(Components, MinValue); | |||
?CARD_TYPE_THREE_TWO -> | |||
find_greater_three_two(Components, MinValue); | |||
?CARD_TYPE_STRAIGHT -> | |||
find_greater_straight(Components, MinValue); | |||
?CARD_TYPE_STRAIGHT_PAIR -> | |||
find_greater_straight_pair(Components, MinValue); | |||
?CARD_TYPE_PLANE -> | |||
find_greater_plane(Components, MinValue); | |||
?CARD_TYPE_BOMB -> | |||
find_greater_bomb(Components, MinValue); | |||
_ -> [] | |||
end. | |||
find_greater_singles(Components, MinValue) -> | |||
[{?CARD_TYPE_SINGLE, Value, [Card]} || | |||
{Value, Card} <- maps:get(singles, Components, []), | |||
Value > MinValue]. | |||
find_greater_pairs(Components, MinValue) -> | |||
[{?CARD_TYPE_PAIR, Value, Cards} || | |||
{Value, Cards} <- maps:get(pairs, Components, []), | |||
Value > MinValue]. | |||
find_greater_triples(Components, MinValue) -> | |||
[{?CARD_TYPE_THREE, Value, Cards} || | |||
{Value, Cards} <- maps:get(triples, Components, []), | |||
Value > MinValue]. | |||
find_greater_three_one(Components, MinValue) -> | |||
Triples = [{V, C} || {V, C} <- maps:get(triples, Components, []), | |||
V > MinValue], | |||
Singles = maps:get(singles, Components, []), | |||
generate_three_one(Triples, Singles). | |||
find_greater_three_two(Components, MinValue) -> | |||
Triples = [{V, C} || {V, C} <- maps:get(triples, Components, []), | |||
V > MinValue], | |||
Pairs = maps:get(pairs, Components, []), | |||
generate_three_two(Triples, Pairs). | |||
find_greater_straight(Components, MinValue) -> | |||
Sequences = maps:get(sequences, Components, []), | |||
[{?CARD_TYPE_STRAIGHT, Value, Cards} || | |||
{Value, Cards} <- Sequences, | |||
Value > MinValue]. | |||
find_greater_straight_pair(Components, MinValue) -> | |||
Sequences = find_pair_sequences(Components), | |||
[{?CARD_TYPE_STRAIGHT_PAIR, Value, Cards} || | |||
{Value, Cards} <- Sequences, | |||
Value > MinValue]. | |||
find_greater_plane(Components, MinValue) -> | |||
Planes = find_planes(Components), | |||
[{?CARD_TYPE_PLANE, Value, Cards} || | |||
{Value, Cards} <- Planes, | |||
Value > MinValue]. | |||
find_greater_bomb(Components, MinValue) -> | |||
[{?CARD_TYPE_BOMB, Value, Cards} || | |||
{Value, Cards} <- maps:get(bombs, Components, []), | |||
Value > MinValue]. | |||
find_bomb_plays(Cards) -> | |||
Components = analyze_components(Cards), | |||
[{?CARD_TYPE_BOMB, Value, Cards} || | |||
{Value, Cards} <- maps:get(bombs, Components, [])]. | |||
find_rocket_play(Cards) -> | |||
Components = analyze_components(Cards), | |||
case find_rocket(Components) of | |||
{ok, Rocket} -> [Rocket]; | |||
_ -> [] | |||
end. | |||
find_rocket(Components) -> | |||
case {find_card(?CARD_JOKER_SMALL, Components), | |||
find_card(?CARD_JOKER_BIG, Components)} of | |||
{{ok, Small}, {ok, Big}} -> | |||
{ok, {?CARD_TYPE_ROCKET, ?CARD_JOKER_BIG, [Small, Big]}}; | |||
_ -> | |||
false | |||
end. | |||
find_card(Value, Components) -> | |||
Singles = maps:get(singles, Components, []), | |||
case lists:keyfind(Value, 1, Singles) of | |||
{Value, Card} -> {ok, Card}; | |||
_ -> false | |||
end. | |||
find_pair_sequences(Components) -> | |||
Pairs = maps:get(pairs, Components, []), | |||
find_consecutive_pairs(lists:sort(Pairs), []). | |||
find_planes(Components) -> | |||
Triples = maps:get(triples, Components, []), | |||
find_consecutive_triples(lists:sort(Triples), []). | |||
find_consecutive_pairs([], Acc) -> lists:reverse(Acc); | |||
find_consecutive_pairs([{V1, Cards1} | Rest], Acc) -> | |||
case find_consecutive_pair_sequence(V1, Cards1, Rest) of | |||
{Sequence, NewRest} when length(Sequence) >= 3 -> | |||
find_consecutive_pairs(NewRest, [{V1, Sequence} | Acc]); | |||
_ -> | |||
find_consecutive_pairs(Rest, Acc) | |||
end. | |||
find_consecutive_pair_sequence(Value, Cards, Rest) -> | |||
find_consecutive_pair_sequence(Value, Cards, Rest, [Cards]). | |||
find_consecutive_pair_sequence(Value, _, [], Acc) -> | |||
{lists:flatten(lists:reverse(Acc)), []}; | |||
find_consecutive_pair_sequence(Value, _, [{NextValue, NextCards} | Rest], Acc) | |||
when NextValue =:= Value + 1 -> | |||
find_consecutive_pair_sequence(NextValue, NextCards, Rest, [NextCards | Acc]); | |||
find_consecutive_pair_sequence(_, _, Rest, Acc) -> | |||
{lists:flatten(lists:reverse(Acc)), Rest}. | |||
find_consecutive_triples([], Acc) -> lists:reverse(Acc); | |||
find_consecutive_triples([{V1, Cards1} | Rest], Acc) -> | |||
case find_consecutive_triple_sequence(V1, Cards1, Rest) of | |||
{Sequence, NewRest} when length(Sequence) >= 6 -> | |||
find_consecutive_triples(NewRest, [{V1, Sequence} | Acc]); | |||
_ -> | |||
find_consecutive_triples(Rest, Acc) | |||
end. | |||
find_consecutive_triple_sequence(Value, Cards, Rest) -> | |||
find_consecutive_triple_sequence(Value, Cards, Rest, [Cards]). | |||
find_consecutive_triple_sequence(Value, _, [], Acc) -> | |||
{lists:flatten(lists:reverse(Acc)), []}; | |||
find_consecutive_triple_sequence(Value, _, [{NextValue, NextCards} | Rest], Acc) | |||
when NextValue =:= Value + 1 -> | |||
find_consecutive_triple_sequence(NextValue, NextCards, Rest, [NextCards | Acc]); | |||
find_consecutive_triple_sequence(_, _, Rest, Acc) -> | |||
{lists:flatten(lists:reverse(Acc)), Rest}. | |||
%% 计算早期、中期和末期的节奏分数 | |||
calculate_early_tempo({Type, Value, _}, _Context) -> | |||
case Type of | |||
?CARD_TYPE_SINGLE when Value < ?CARD_2 -> 0.8; | |||
?CARD_TYPE_PAIR when Value < ?CARD_2 -> 0.7; | |||
?CARD_TYPE_STRAIGHT -> 0.9; | |||
?CARD_TYPE_STRAIGHT_PAIR -> 0.85; | |||
_ -> 0.5 | |||
end. | |||
calculate_mid_tempo({Type, Value, _}, _Context) -> | |||
case Type of | |||
?CARD_TYPE_THREE_ONE -> 0.8; | |||
?CARD_TYPE_THREE_TWO -> 0.85; | |||
?CARD_TYPE_PLANE -> 0.9; | |||
?CARD_TYPE_BOMB -> 0.7; | |||
_ -> 0.6 | |||
end. | |||
calculate_end_tempo({Type, Value, _}, Context) -> | |||
CardsLeft = Context#play_context.cards_remaining, | |||
case Type of | |||
?CARD_TYPE_BOMB -> 0.9; | |||
?CARD_TYPE_ROCKET -> 1.0; | |||
_ when CardsLeft =< 4 -> 0.95; | |||
_ -> 0.7 | |||
end. | |||
%% 过滤候选出牌 | |||
filter_candidates(Plays, Context) -> | |||
case Context#play_context.game_stage of | |||
early_game -> | |||
filter_early_game_plays(Plays, Context); | |||
mid_game -> | |||
filter_mid_game_plays(Plays, Context); | |||
end_game -> | |||
filter_end_game_plays(Plays, Context) | |||
end. | |||
filter_early_game_plays(Plays, _Context) -> | |||
% 早期游戏倾向于出小牌,保留炸弹 | |||
[Play || {Type, Value, _} = Play <- Plays, | |||
Type =/= ?CARD_TYPE_BOMB orelse Value >= ?CARD_2]. | |||
filter_mid_game_plays(Plays, Context) -> | |||
% 中期游戏根据局势决定是否使用炸弹 | |||
case Context#play_context.control_factor < 0.5 of | |||
true -> Plays; | |||
false -> | |||
[Play || {Type, _, _} = Play <- Plays, | |||
Type =/= ?CARD_TYPE_BOMB] | |||
end. | |||
filter_end_game_plays(Plays, _Context) -> | |||
% 末期游戏可以使用任何牌型 | |||
Plays. | |||
%% 对手牌进行分组 | |||
group_cards_by_value(Cards) -> | |||
lists:foldl(fun(Card, Acc) -> | |||
{Value, _} = Card, | |||
maps:update_with(Value, | |||
fun(List) -> [Card|List] end, | |||
[Card], | |||
Acc) | |||
end, maps:new(), Cards). |