Pārlūkot izejas kodu

ft:修改调整

master
SisMaker pirms 3 mēnešiem
vecāks
revīzija
dd32ac00da
12 mainītis faili ar 415 papildinājumiem un 1021 dzēšanām
  1. +0
    -1
      README.md
  2. +261
    -6
      src/ai_player.erl
  3. +6
    -1
      src/ai_strategy.erl
  4. +30
    -210
      src/doudizhu_ai.erl
  5. +0
    -44
      src/doudizhu_ai_sup.erl
  6. +33
    -2
      src/game_manager.erl
  7. +49
    -631
      src/ml_engine.erl
  8. +19
    -1
      src/performance_optimization.erl
  9. +17
    -12
      src/score_system.erl
  10. +0
    -53
      src/strategy_optimizer.erl
  11. +0
    -58
      src/visualization.erl
  12. +0
    -2
      斗地主.md

+ 0
- 1
README.md Parādīt failu

@ -33,7 +33,6 @@ Build
3. **系统支持模块**
- parallel_compute.erl: 并行计算
- performance_monitor.erl: 性能监控
- visualization.erl: 可视化分析
## 功能特性

+ 261
- 6
src/ai_player.erl Parādīt failu

@ -67,6 +67,14 @@ handle_cast(_Msg, State) ->
handle_info(_Info, State) ->
{noreply, State}.
%
terminate(_Reason, _State) ->
ok.
%
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%
% AI玩家名称
@ -195,10 +203,257 @@ find_minimum_bigger_combination(Cards, LastPlay) ->
_ -> Bigger
end.
%
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%
find_rocket(Cards) ->
%
HasSmallJoker = lists:any(fun(Card) -> Card =:= {"", "小王"} end, Cards),
HasBigJoker = lists:any(fun(Card) -> Card =:= {"", "大王"} end, Cards),
case HasSmallJoker andalso HasBigJoker of
true -> {ok, [{"", "小王"}, {"", "大王"}]};
false -> error
end.
%
terminate(_Reason, _State) ->
ok.
%
find_bomb(Cards) ->
%
ValueGroups = lists:foldl(
fun({_, Number}, Acc) ->
Count = maps:get(Number, Acc, 0),
Acc#{Number => Count + 1}
end,
#{},
Cards
),
% 4
BombValues = [Value || {Value, Count} <- maps:to_list(ValueGroups), Count >= 4],
case BombValues of
[] -> error;
[Value|_] ->
% 4
BombCards = lists:filter(fun({_, V}) -> V =:= Value end, Cards),
{ok, lists:sublist(BombCards, 4)}
end.
%
find_best_normal_combination(Cards) ->
%
%
case find_straight(Cards) of
{ok, Straight} -> Straight;
_ ->
case find_three_with_pair(Cards) of
{ok, ThreeWithPair} -> ThreeWithPair;
_ ->
case find_three(Cards) of
{ok, Three} -> Three;
_ ->
case find_pair(Cards) of
{ok, Pair} -> Pair;
_ ->
case find_single(Cards) of
{ok, Single} -> Single;
_ -> []
end
end
end
end
end.
%
find_bigger_bomb(Cards, LastPlay) ->
%
[{_, LastValue}|_] = LastPlay,
%
{ok, MyBomb} = find_bomb(Cards),
[{_, MyValue}|_] = MyBomb,
%
case card_value(MyValue) > card_value(LastValue) of
true -> MyBomb;
false -> []
end.
%
find_bigger_normal_combination(Cards, LastPlay, Type) ->
%
%
case Type of
single -> find_bigger_single(Cards, LastPlay);
pair -> find_bigger_pair(Cards, LastPlay);
triple -> find_bigger_triple(Cards, LastPlay);
straight -> find_bigger_straight(Cards, LastPlay);
_ -> []
end.
%
find_bigger_pair(Cards, LastPlay) ->
[{_, LastValue}|_] = LastPlay,
LastValueNum = card_value(LastValue),
%
ValueGroups = lists:foldl(
fun({_, Number}, Acc) ->
Count = maps:get(Number, Acc, 0),
Acc#{Number => Count + 1}
end,
#{},
Cards
),
% LastValue且数量至少为2的组
BiggerPairValues = [Value || {Value, Count} <- maps:to_list(ValueGroups),
Count >= 2, card_value(Value) > LastValueNum],
case BiggerPairValues of
[] -> [];
_ ->
% LastValue的对子
SortedValues = lists:sort(
fun(V1, V2) -> card_value(V1) < card_value(V2) end,
BiggerPairValues
),
SmallestValue = hd(SortedValues),
PairCards = lists:filter(fun({_, V}) -> V =:= SmallestValue end, Cards),
lists:sublist(PairCards, 2)
end.
%
find_bigger_triple(Cards, LastPlay) ->
[{_, LastValue}|_] = LastPlay,
LastValueNum = card_value(LastValue),
%
ValueGroups = lists:foldl(
fun({_, Number}, Acc) ->
Count = maps:get(Number, Acc, 0),
Acc#{Number => Count + 1}
end,
#{},
Cards
),
% LastValue且数量至少为3的组
BiggerTripleValues = [Value || {Value, Count} <- maps:to_list(ValueGroups),
Count >= 3, card_value(Value) > LastValueNum],
case BiggerTripleValues of
[] -> [];
_ ->
% LastValue的三张
SortedValues = lists:sort(
fun(V1, V2) -> card_value(V1) < card_value(V2) end,
BiggerTripleValues
),
SmallestValue = hd(SortedValues),
TripleCards = lists:filter(fun({_, V}) -> V =:= SmallestValue end, Cards),
lists:sublist(TripleCards, 3)
end.
%
find_bigger_straight(Cards, LastPlay) ->
%
[].
%
find_single(Cards) ->
case Cards of
[] -> error;
[Card|_] -> {ok, [Card]}
end.
%
find_pair(Cards) ->
%
ValueGroups = lists:foldl(
fun({_, Number}, Acc) ->
Count = maps:get(Number, Acc, 0),
Acc#{Number => Count + 1}
end,
#{},
Cards
),
% 2
PairValues = [Value || {Value, Count} <- maps:to_list(ValueGroups), Count >= 2],
case PairValues of
[] -> error;
[Value|_] ->
% 2
PairCards = lists:filter(fun({_, V}) -> V =:= Value end, Cards),
{ok, lists:sublist(PairCards, 2)}
end.
%
find_three(Cards) ->
%
ValueGroups = lists:foldl(
fun({_, Number}, Acc) ->
Count = maps:get(Number, Acc, 0),
Acc#{Number => Count + 1}
end,
#{},
Cards
),
% 3
ThreeValues = [Value || {Value, Count} <- maps:to_list(ValueGroups), Count >= 3],
case ThreeValues of
[] -> error;
[Value|_] ->
% 3
ThreeCards = lists:filter(fun({_, V}) -> V =:= Value end, Cards),
{ok, lists:sublist(ThreeCards, 3)}
end.
%
find_three_with_pair(Cards) ->
%
case find_three(Cards) of
{ok, Three} ->
RemainingCards = Cards -- Three,
case find_pair(RemainingCards) of
{ok, Pair} -> {ok, Three ++ Pair};
_ -> error
end;
_ -> error
end.
%
find_straight(Cards) ->
%
error.
%
card_value("大王") -> 17;
card_value("小王") -> 16;
card_value("2") -> 15;
card_value("A") -> 14;
card_value("K") -> 13;
card_value("Q") -> 12;
card_value("J") -> 11;
card_value(Number) when is_list(Number) ->
try
list_to_integer(Number)
catch
error:_ -> 0
end.
%
find_bigger_single(Cards, LastPlay) ->
[{_, LastValue}|_] = LastPlay,
LastValueNum = card_value(LastValue),
% LastValue的单牌
BiggerCards = lists:filter(
fun({_, Value}) -> card_value(Value) > LastValueNum end,
Cards
),
case BiggerCards of
[] -> [];
_ ->
% LastValue的牌
SortedBiggerCards = lists:sort(
fun({_, V1}, {_, V2}) -> card_value(V1) < card_value(V2) end,
BiggerCards
),
[hd(SortedBiggerCards)]
end.

+ 6
- 1
src/ai_strategy.erl Parādīt failu

@ -164,7 +164,12 @@ evaluate_control_impact(Play, GameState) ->
1 -> 0.3; %
2 -> 0.5; %
3 -> 0.7; %
4 when Play == [Play|_] -> 1.0; %
4 ->
%
case lists:all(fun(Card) -> Card =:= hd(Play) end, Play) of
true -> 1.0; %
false -> 0.8 %
end;
_ -> 0.6 %
end.

+ 30
- 210
src/doudizhu_ai.erl Parādīt failu

@ -19,17 +19,6 @@
-include("card_types.hrl").
-record(state, {
player_id, % AI ID
role, % dizhu | nongmin ()
known_cards = [], %
hand_cards = [], %
played_cards = [], %
other_players = [], %
game_history = [], %
strategy_cache = #{} %
}).
%% API
start_link(PlayerId) ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [PlayerId], []).
@ -59,7 +48,7 @@ handle_call(_Request, _From, State) ->
{reply, ok, State}.
handle_cast({update_game_state, Event, Data}, State) ->
NewState = update_state(Event, Data, State),
NewState = process_game_event(Event, Data, State),
{noreply, NewState};
handle_cast(_Msg, State) ->
@ -76,202 +65,33 @@ code_change(_OldVsn, State, _Extra) ->
%%
%
calculate_best_move(GameState, Options, State) ->
% 1.
Situation = analyze_situation(GameState, State),
% 2.
PossibleMoves = generate_possible_moves(GameState, Options, State),
% 3.
ScoredMoves = evaluate_moves(PossibleMoves, Situation, State),
% 4.
{BestMove, Score} = select_best_move(ScoredMoves),
% 5.
NewState = update_strategy_state(BestMove, Score, State),
{BestMove, NewState}.
analyze_situation(GameState, State) ->
#situation{
game_stage = determine_game_stage(GameState),
hand_strength = evaluate_hand_strength(State#state.hand_cards),
control_level = calculate_control_level(GameState, State),
winning_probability = estimate_winning_probability(GameState, State)
}.
%%
determine_game_stage(GameState) ->
CardsLeft = count_remaining_cards(GameState),
cond do
CardsLeft > 15 -> early_game;
CardsLeft > 8 -> mid_game;
true -> end_game
end.
%%
evaluate_hand_strength(Cards) ->
%
Components = analyze_card_components(Cards),
%
BaseScore = calculate_base_score(Components),
%
ComboScore = calculate_combo_value(Components),
%
ControlScore = calculate_control_value(Components),
%
#hand_strength{
base_score = BaseScore,
combo_score = ComboScore,
control_score = ControlScore,
total_score = BaseScore + ComboScore + ControlScore
}.
%%
analyze_card_components(Cards) ->
%
Groups = group_cards_by_value(Cards),
%
Singles = find_singles(Groups),
Pairs = find_pairs(Groups),
Triples = find_triples(Groups),
Bombs = find_bombs(Groups),
Sequences = find_sequences(Groups),
%
#components{
singles = Singles,
pairs = Pairs,
triples = Triples,
bombs = Bombs,
sequences = Sequences
}.
%%
generate_possible_moves(GameState, Options, State) ->
LastPlay = get_last_play(GameState),
HandCards = State#state.hand_cards,
case LastPlay of
none ->
generate_leading_moves(HandCards);
{Type, Value, _Cards} ->
generate_following_moves(HandCards, Type, Value)
end.
%%
evaluate_moves(Moves, Situation, State) ->
lists:map(fun(Move) ->
Score = calculate_move_score(Move, Situation, State),
{Move, Score}
end, Moves).
%%
calculate_move_score(Move, Situation, State) ->
BaseScore = calculate_base_move_score(Move),
PositionScore = calculate_position_score(Move, Situation),
StrategyScore = calculate_strategy_score(Move, Situation, State),
RiskScore = calculate_risk_score(Move, Situation, State),
BaseScore * 0.4 +
PositionScore * 0.2 +
StrategyScore * 0.3 +
RiskScore * 0.1.
%%
select_best_move(ScoredMoves) ->
lists:foldl(fun
({Move, Score}, {BestMove, BestScore}) when Score > BestScore ->
{Move, Score};
(_, Current) ->
Current
end, {pass, 0}, ScoredMoves).
%%
update_strategy_state(Move, Score, State) ->
NewCache = update_strategy_cache(Move, Score, State#state.strategy_cache),
State#state{strategy_cache = NewCache}.
%%
generate_leading_moves(Cards) ->
%
Components = analyze_card_components(Cards),
%
Singles = generate_single_moves(Components),
Pairs = generate_pair_moves(Components),
Triples = generate_triple_moves(Components),
Sequences = generate_sequence_moves(Components),
Bombs = generate_bomb_moves(Components),
%
Singles ++ Pairs ++ Triples ++ Sequences ++ Bombs.
%%
generate_following_moves(Cards, Type, MinValue) ->
%
ValidMoves = find_valid_moves(Cards, Type, MinValue),
%
SpecialMoves = find_special_moves(Cards),
ValidMoves ++ SpecialMoves.
%%
estimate_winning_probability(GameState, State) ->
%
HandStrength = evaluate_hand_strength(State#state.hand_cards),
Position = evaluate_position(GameState),
RemainingCards = analyze_remaining_cards(GameState, State),
ControlFactor = calculate_control_factor(GameState, State),
BaseProb = calculate_base_probability(HandStrength, Position),
AdjustedProb = adjust_probability(BaseProb, RemainingCards, ControlFactor),
clamp(AdjustedProb, 0.0, 1.0).
%%
calculate_control_factor(GameState, State) ->
ControlCards = count_control_cards(State#state.hand_cards),
TotalCards = count_total_cards(GameState),
RemainingCards = count_remaining_cards(GameState),
ControlRatio = ControlCards / max(1, RemainingCards),
PositionBonus = calculate_position_bonus(GameState, State),
ControlRatio * PositionBonus.
%%
analyze_remaining_cards(GameState, State) ->
PlayedCards = get_played_cards(GameState),
KnownCards = State#state.known_cards,
AllCards = generate_full_deck(),
RemainingCards = AllCards -- (PlayedCards ++ KnownCards),
analyze_card_distribution(RemainingCards).
%%
update_state(Event, Data, State) ->
case Event of
play_cards ->
update_after_play(Data, State);
receive_cards ->
update_after_receive(Data, State);
game_over ->
update_after_game_over(Data, State);
_ ->
State
end.
%%
clamp(Value, Min, Max) ->
min(Max, max(Min, Value)).
%%
calculate_base_probability(HandStrength, Position) ->
BaseProb = HandStrength#hand_strength.total_score / 100,
PositionMod = case Position of
first -> 1.2;
middle -> 1.0;
last -> 0.8
end,
BaseProb * PositionMod.
%%
adjust_probability(BaseProb, RemainingCards, ControlFactor) ->
RemainingMod = calculate_remaining_modifier(RemainingCards),
ControlMod = calculate_control_modifier(ControlFactor),
BaseProb * RemainingMod * ControlMod.
% 使
Strategy = doudizhu_ai_strategy:choose_strategy(GameState, State),
Move = doudizhu_ai_strategy:execute_strategy(Strategy, GameState, State),
{Move, State#state{strategy_cache = #{current_strategy => Strategy}}}.
%
perform_cards_analysis(Cards, State) ->
%
HandValue = doudizhu_ai_strategy:analyze_hand_value(Cards),
{hand_value, HandValue}.
%
process_game_event(card_played, {Player, Cards}, State) ->
%
NewPlayedCards = [{Player, Cards} | State#state.played_cards],
State#state{played_cards = NewPlayedCards};
process_game_event(new_hand, Cards, State) ->
%
State#state{hand_cards = Cards};
process_game_event(role_assigned, Role, State) ->
%
State#state{role = Role};
process_game_event(_, _, State) ->
%
State.

+ 0
- 44
src/doudizhu_ai_sup.erl Parādīt failu

@ -1,44 +0,0 @@
-module(doudizhu_ai_sup).
-behaviour(supervisor).
-export([start_link/0]).
-export([init/1]).
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) ->
SupFlags = #{
strategy => one_for_one,
intensity => 10,
period => 60
},
Children = [
#{
id => ml_engine,
start => {ml_engine, start_link, []},
restart => permanent,
shutdown => 5000,
type => worker,
modules => [ml_engine]
},
#{
id => training_system,
start => {training_system, start_link, []},
restart => permanent,
shutdown => 5000,
type => worker,
modules => [training_system]
},
#{
id => visualization,
start => {visualization, start_link, []},
restart => permanent,
shutdown => 5000,
type => worker,
modules => [visualization]
}
],
{ok, {SupFlags, Children}}.

+ 33
- 2
src/game_manager.erl Parādīt failu

@ -1,6 +1,8 @@
-module(game_manager).
-export([start_game/3, handle_play/2, end_game/1]).
-include("../include/game_records.hrl").
-record(game_manager_state, {
game_id,
players,
@ -120,11 +122,40 @@ update_ai_players(AIPlayers, Play) ->
calculate_final_scores(GameManagerState) ->
%
%
Winner = get_winner(GameManagerState),
Scores = get_player_scores(GameManagerState),
#{
winner => get_winner(GameManagerState),
scores => get_player_scores(GameManagerState)
winner => Winner,
scores => Scores
}.
%%
get_winner(GameManagerState) ->
%
GameState = GameManagerState#game_manager_state.current_state,
Players = GameState#game_state.players,
case lists:keyfind(0, 2, Players) of
{Pid, _, _} -> Pid;
false -> none
end.
%%
get_player_scores(GameManagerState) ->
%
GameState = GameManagerState#game_manager_state.current_state,
Players = GameState#game_state.players,
lists:foldl(
fun({Pid, Cards, Role}, Acc) ->
Score = case Role of
landlord -> 100 - length(Cards) * 10;
farmer -> 50 - length(Cards) * 5
end,
Acc#{Pid => Score}
end,
#{},
Players
).
%%
update_player_stats(GameManagerState, FinalScores) ->
%

+ 49
- 631
src/ml_engine.erl Parādīt failu

@ -1,125 +1,61 @@
-module(ml_engine).
-behaviour(gen_server).
%% API exports
-export([
start_link/0,
train/2,
predict/2,
update_model/2,
get_model_state/0,
save_model/1,
load_model/1,
add_training_sample/1
]).
%% gen_server callbacks
-export([
init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3
]).
-include("card_types.hrl").
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-export([train/2, predict/2, update_model/2]).
-record(state, {
model, %
model_version = 1, %
training_data = [], %
hyperparameters = #{}, %
feature_config = #{}, %
last_update, %
performance_metrics = [] %
model_type = basic,
model_data = #{},
training_history = [],
performance_metrics = #{}
}).
-record(model, {
weights = #{}, %
biases = #{}, %
layers = [], %
activation_functions = #{}, %
normalization_params = #{}, %
feature_importance = #{}, %
last_train_error = 0.0 %
}).
%% API
%% API
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
train(Data, Options) ->
gen_server:call(?MODULE, {train, Data, Options}, infinity).
gen_server:start_link(?MODULE, [], []).
predict(Features, Options) ->
gen_server:call(?MODULE, {predict, Features, Options}).
train(Pid, TrainingData) ->
gen_server:call(Pid, {train, TrainingData}).
update_model(NewModel, Options) ->
gen_server:cast(?MODULE, {update_model, NewModel, Options}).
predict(Pid, InputData) ->
gen_server:call(Pid, {predict, InputData}).
get_model_state() ->
gen_server:call(?MODULE, get_model_state).
update_model(Pid, ModelData) ->
gen_server:call(Pid, {update_model, ModelData}).
save_model(Filename) ->
gen_server:call(?MODULE, {save_model, Filename}).
load_model(Filename) ->
gen_server:call(?MODULE, {load_model, Filename}).
add_training_sample(Sample) ->
gen_server:cast(?MODULE, {add_training_sample, Sample}).
%% Callback
%% Callbacks
init([]) ->
{ok, #state{
model = initialize_model(),
last_update = os:timestamp(),
hyperparameters = default_hyperparameters(),
feature_config = default_feature_config()
}}.
handle_call({train, Data, Options}, _From, State) ->
{Result, NewState} = do_train(Data, Options, State),
{reply, Result, NewState};
handle_call({predict, Features, Options}, _From, State) ->
{Prediction, NewState} = do_predict(Features, Options, State),
{reply, Prediction, NewState};
handle_call(get_model_state, _From, State) ->
{reply, get_current_model_state(State), State};
{ok, #state{}}.
handle_call({train, TrainingData}, _From, State) ->
%
NewModelData = train_model(State#state.model_data, TrainingData),
NewHistory = [TrainingData | State#state.training_history],
NewState = State#state{
model_data = NewModelData,
training_history = NewHistory
},
{reply, {ok, compute_metrics(NewState)}, NewState};
handle_call({save_model, Filename}, _From, State) ->
Result = do_save_model(State, Filename),
{reply, Result, State};
handle_call({predict, InputData}, _From, State) ->
Result = predict_with_model(State#state.model_data, InputData),
{reply, {ok, Result}, State};
handle_call({load_model, Filename}, _From, State) ->
case do_load_model(Filename) of
{ok, NewState} -> {reply, ok, NewState};
Error -> {reply, Error, State}
end;
handle_call({update_model, ModelData}, _From, State) ->
NewState = State#state{model_data = ModelData},
{reply, ok, NewState};
handle_call(_Request, _From, State) ->
{reply, {error, unknown_call}, State}.
handle_cast({update_model, NewModel, Options}, State) ->
{noreply, do_update_model(State, NewModel, Options)};
handle_cast({add_training_sample, Sample}, State) ->
{noreply, do_add_training_sample(State, Sample)};
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(update_metrics, State) ->
{noreply, update_performance_metrics(State)};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, State) ->
save_final_state(State),
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
@ -127,537 +63,19 @@ code_change(_OldVsn, State, _Extra) ->
%%
%%
initialize_model() ->
#model{
weights = initialize_weights(),
biases = initialize_biases(),
layers = default_layer_configuration(),
activation_functions = default_activation_functions(),
normalization_params = initialize_normalization_params()
}.
initialize_weights() ->
#{
'input_layer' => random_matrix(64, 128),
'hidden_layer_1' => random_matrix(128, 256),
'hidden_layer_2' => random_matrix(256, 128),
'output_layer' => random_matrix(128, 1)
}.
initialize_biases() ->
#{
'input_layer' => zeros_vector(128),
'hidden_layer_1' => zeros_vector(256),
'hidden_layer_2' => zeros_vector(128),
'output_layer' => zeros_vector(1)
}.
%%
default_hyperparameters() ->
#{
learning_rate => 0.001,
batch_size => 32,
epochs => 100,
momentum => 0.9,
dropout_rate => 0.5,
l2_regularization => 0.01,
early_stopping_patience => 5
}.
default_feature_config() ->
#{
card_value_weight => 1.0,
card_type_weight => 0.8,
sequence_weight => 1.2,
combo_weight => 1.5,
position_weight => 0.7,
timing_weight => 0.9
}.
default_layer_configuration() ->
[
{input, 64},
{dense, 128, relu},
{dropout, 0.5},
{dense, 256, relu},
{dropout, 0.5},
{dense, 128, relu},
{dense, 1, sigmoid}
].
default_activation_functions() ->
#{
relu => fun(X) -> max(0, X) end,
sigmoid => fun(X) -> 1 / (1 + math:exp(-X)) end,
tanh => fun(X) -> math:tanh(X) end,
softmax => fun softmax/1
}.
%%
do_train(Data, Options, State) ->
try
%
ProcessedData = preprocess_data(Data, State),
%
{TrainData, ValidData} = split_train_valid(ProcessedData),
%
{NewModel, TrainMetrics} = train_model(TrainData, ValidData, Options, State),
%
NewState = update_state_after_training(State, NewModel, TrainMetrics),
{{ok, TrainMetrics}, NewState}
catch
Error:Reason ->
{{error, {Error, Reason}}, State}
end.
%%
do_predict(Features, Options, State) ->
try
%
ProcessedFeatures = preprocess_features(Features, State),
%
Prediction = forward_pass(ProcessedFeatures, State#state.model),
%
ProcessedPrediction = postprocess_prediction(Prediction, Options),
{ProcessedPrediction, State}
catch
Error:Reason ->
{{error, {Error, Reason}}, State}
end.
%%
do_update_model(State, NewModel, Options) ->
ValidatedModel = validate_model(NewModel),
State#state{
model = ValidatedModel,
model_version = State#state.model_version + 1,
last_update = os:timestamp()
}.
%%
do_add_training_sample(State, Sample) ->
ValidatedSample = validate_sample(Sample),
NewTrainingData = [ValidatedSample | State#state.training_data],
State#state{training_data = NewTrainingData}.
%%
preprocess_data(Data, State) ->
%
Features = extract_features(Data),
%
NormalizedFeatures = normalize_features(Features, State#state.model.normalization_params),
%
SelectedFeatures = select_features(NormalizedFeatures, State#state.feature_config),
%
AugmentedFeatures = augment_features(SelectedFeatures),
AugmentedFeatures.
%%
preprocess_features(Features, State) ->
%
NormalizedFeatures = normalize_features(Features, State#state.model.normalization_params),
%
TransformedFeatures = transform_features(NormalizedFeatures),
TransformedFeatures.
%%
train_model(TrainData, ValidData, Options, State) ->
InitialModel = State#state.model,
Epochs = maps:get(epochs, Options, 100),
BatchSize = maps:get(batch_size, Options, 32),
train_epochs(InitialModel, TrainData, ValidData, Epochs, BatchSize, Options).
train_epochs(Model, _, _, 0, _, _) ->
{Model, []};
train_epochs(Model, TrainData, ValidData, Epochs, BatchSize, Options) ->
%
Batches = create_batches(TrainData, BatchSize),
% epoch
{UpdatedModel, EpochMetrics} = train_epoch(Model, Batches, ValidData, Options),
%
case should_early_stop(EpochMetrics, Options) of
true ->
{UpdatedModel, EpochMetrics};
false ->
train_epochs(UpdatedModel, TrainData, ValidData, Epochs-1, BatchSize, Options)
end.
train_epoch(Model, Batches, ValidData, Options) ->
%
{TrainedModel, BatchMetrics} = train_batches(Model, Batches, Options),
%
ValidationMetrics = evaluate_model(TrainedModel, ValidData),
%
EpochMetrics = merge_metrics(BatchMetrics, ValidationMetrics),
{TrainedModel, EpochMetrics}.
train_batches(Model, Batches, Options) ->
lists:foldl(
fun(Batch, {CurrentModel, Metrics}) ->
{UpdatedModel, BatchMetric} = train_batch(CurrentModel, Batch, Options),
{UpdatedModel, [BatchMetric|Metrics]}
end,
{Model, []},
Batches
).
train_batch(Model, Batch, Options) ->
%
{Predictions, CacheData} = forward_pass_with_cache(Model, Batch),
%
{Loss, LossGrad} = calculate_loss(Predictions, Batch, Options),
%
Gradients = backward_pass(LossGrad, CacheData, Model),
%
UpdatedModel = update_model_parameters(Model, Gradients, Options),
%
{UpdatedModel, #{loss => Loss}}.
%%
evaluate_model(Model, Data) ->
%
Predictions = forward_pass(Model, Data),
%
#{
accuracy => calculate_accuracy(Predictions, Data),
precision => calculate_precision(Predictions, Data),
recall => calculate_recall(Predictions, Data),
f1_score => calculate_f1_score(Predictions, Data)
}.
%%
random_matrix(Rows, Cols) ->
[
[rand:normal() / math:sqrt(Rows) || _ <- lists:seq(1, Cols)]
|| _ <- lists:seq(1, Rows)
].
zeros_vector(Size) ->
[0.0 || _ <- lists:seq(1, Size)].
softmax(X) ->
Exp = [math:exp(Xi) || Xi <- X],
Sum = lists:sum(Exp),
[E / Sum || E <- Exp].
create_batches(Data, BatchSize) ->
create_batches(Data, BatchSize, []).
create_batches([], _, Acc) ->
lists:reverse(Acc);
create_batches(Data, BatchSize, Acc) ->
{Batch, Rest} = case length(Data) of
N when N > BatchSize ->
lists:split(BatchSize, Data);
_ ->
{Data, []}
end,
create_batches(Rest, BatchSize, [Batch|Acc]).
%%
do_save_model(State, Filename) ->
ModelData = #{
model => State#state.model,
version => State#state.model_version,
hyperparameters => State#state.hyperparameters,
feature_config => State#state.feature_config,
timestamp => os:timestamp()
},
file:write_file(Filename, term_to_binary(ModelData)).
do_load_model(Filename) ->
case file:read_file(Filename) of
{ok, Binary} ->
try
ModelData = binary_to_term(Binary),
{ok, create_state_from_model_data(ModelData)}
catch
_:_ -> {error, invalid_model_file}
end;
Error ->
Error
end.
create_state_from_model_data(ModelData) ->
#state{
model = maps:get(model, ModelData),
model_version = maps:get(version, ModelData),
hyperparameters = maps:get(hyperparameters, ModelData),
feature_config = maps:get(feature_config, ModelData),
last_update = maps:get(timestamp, ModelData)
}.
%%
update_performance_metrics(State) ->
NewMetrics = calculate_current_metrics(State),
State#state{
performance_metrics = [NewMetrics | State#state.performance_metrics]
}.
calculate_current_metrics(State) ->
Model = State#state.model,
#{
loss => Model#model.last_train_error,
timestamp => os:timestamp()
}.
%%
update_state_after_training(State, NewModel, Metrics) ->
State#state{
model = NewModel,
model_version = State#state.model_version + 1,
last_update = os:timestamp(),
performance_metrics = [Metrics | State#state.performance_metrics]
}.
%%
validate_model(Model) ->
%
ValidatedWeights = validate_weights(Model#model.weights),
ValidatedBiases = validate_biases(Model#model.biases),
%
ValidatedLayers = validate_layers(Model#model.layers),
%
ValidatedActivations = validate_activation_functions(Model#model.activation_functions),
Model#model{
weights = ValidatedWeights,
biases = ValidatedBiases,
layers = ValidatedLayers,
activation_functions = ValidatedActivations
}.
validate_weights(Weights) ->
maps:map(fun(Layer, W) ->
validate_weight_matrix(W)
end, Weights).
validate_biases(Biases) ->
maps:map(fun(Layer, B) ->
validate_bias_vector(B)
end, Biases).
validate_layers(Layers) ->
lists:map(fun validate_layer/1, Layers).
validate_activation_functions(ActivationFns) ->
maps:filter(fun(Name, Fn) ->
is_valid_activation_function(Name, Fn)
end, ActivationFns).
%%
forward_pass(Model, Input) ->
{Output, _Cache} = forward_pass_with_cache(Model, Input),
Output.
forward_pass_with_cache(Model, Input) ->
InitialCache = #{input => Input},
lists:foldl(
fun(Layer, {CurrentInput, Cache}) ->
{Output, LayerCache} = forward_layer(Layer, CurrentInput, Model),
{Output, Cache#{get_layer_name(Layer) => LayerCache}}
end,
{Input, InitialCache},
Model#model.layers
).
forward_layer({dense, Size, Activation}, Input, Model) ->
Weights = maps:get(dense, Model#model.weights),
Bias = maps:get(dense, Model#model.biases),
% 线
Z = matrix_multiply(Input, Weights) + Bias,
%
ActivationFn = maps:get(Activation, Model#model.activation_functions),
Output = ActivationFn(Z),
{Output, #{pre_activation => Z, output => Output}};
forward_layer({dropout, Rate}, Input, Model) ->
case get_training_mode(Model) of
true ->
Mask = generate_dropout_mask(Input, Rate),
Output = element_wise_multiply(Input, Mask),
{Output, #{mask => Mask}};
false ->
{Input, #{}}
end.
%%
backward_pass(LossGrad, Cache, Model) ->
{_, Gradients} = lists:foldr(
fun(Layer, {CurrentGrad, LayerGrads}) ->
LayerCache = maps:get(get_layer_name(Layer), Cache),
{NextGrad, LayerGrad} = backward_layer(Layer, CurrentGrad, LayerCache, Model),
{NextGrad, [LayerGrad | LayerGrads]}
end,
{LossGrad, []},
Model#model.layers
),
consolidate_gradients(Gradients).
backward_layer({dense, Size, Activation}, Grad, Cache, Model) ->
%
ActivationGrad = get_activation_gradient(Activation),
%
PreAct = maps:get(pre_activation, Cache),
DZ = element_wise_multiply(Grad, ActivationGrad(PreAct)),
%
Input = maps:get(input, Cache),
WeightGrad = matrix_multiply(transpose(Input), DZ),
BiasGrad = sum_columns(DZ),
%
Weights = maps:get(dense, Model#model.weights),
InputGrad = matrix_multiply(DZ, transpose(Weights)),
{InputGrad, #{weights => WeightGrad, bias => BiasGrad}};
backward_layer({dropout, Rate}, Grad, Cache, _Model) ->
Mask = maps:get(mask, Cache),
{element_wise_multiply(Grad, Mask), #{}}.
%%
calculate_loss(Predictions, Targets, Options) ->
LossType = maps:get(loss_type, Options, cross_entropy),
calculate_loss_by_type(LossType, Predictions, Targets).
calculate_loss_by_type(cross_entropy, Predictions, Targets) ->
Loss = cross_entropy_loss(Predictions, Targets),
Gradient = cross_entropy_gradient(Predictions, Targets),
{Loss, Gradient};
calculate_loss_by_type(mse, Predictions, Targets) ->
Loss = mean_squared_error(Predictions, Targets),
Gradient = mse_gradient(Predictions, Targets),
{Loss, Gradient}.
%%
update_model_parameters(Model, Gradients, Options) ->
Optimizer = maps:get(optimizer, Options, adam),
LearningRate = maps:get(learning_rate, Options, 0.001),
update_parameters_with_optimizer(Model, Gradients, Optimizer, LearningRate).
update_parameters_with_optimizer(Model, Gradients, adam, LearningRate) ->
% Adam优化器实现
Beta1 = 0.9,
Beta2 = 0.999,
Epsilon = 1.0e-8,
%
{NewWeights, NewMomentum} = update_adam_parameters(
Model#model.weights,
maps:get(weights, Gradients),
maps:get(momentum, Model, #{}),
LearningRate,
Beta1,
Beta2,
Epsilon
),
Model#model{
weights = NewWeights,
momentum = NewMomentum
};
update_parameters_with_optimizer(Model, Gradients, sgd, LearningRate) ->
% SGD优化器实现
NewWeights = update_sgd_parameters(
Model#model.weights,
maps:get(weights, Gradients),
LearningRate
),
Model#model{weights = NewWeights}.
%%
extract_features(Data) ->
lists:map(fun extract_sample_features/1, Data).
extract_sample_features(Sample) ->
BasicFeatures = extract_basic_features(Sample),
AdvancedFeatures = extract_advanced_features(Sample),
combine_features(BasicFeatures, AdvancedFeatures).
extract_basic_features(Sample) ->
#{
card_values => extract_card_values(Sample),
card_types => extract_card_types(Sample),
card_counts => extract_card_counts(Sample)
}.
extract_advanced_features(Sample) ->
#{
combinations => find_card_combinations(Sample),
sequences => find_card_sequences(Sample),
special_patterns => find_special_patterns(Sample)
}.
%%
matrix_multiply(A, B) ->
%
case {matrix_dimensions(A), matrix_dimensions(B)} of
{{RowsA, ColsA}, {RowsB, ColsB}} when ColsA =:= RowsB ->
do_matrix_multiply(A, B, RowsA, ColsB);
_ ->
error(matrix_dimension_mismatch)
end.
do_matrix_multiply(A, B, RowsA, ColsB) ->
[[dot_product(get_row(A, I), get_col(B, J)) || J <- lists:seq(1, ColsB)]
|| I <- lists:seq(1, RowsA)].
dot_product(Vec1, Vec2) ->
lists:sum([X * Y || {X, Y} <- lists:zip(Vec1, Vec2)]).
transpose(Matrix) ->
case Matrix of
[] -> [];
[[]|_] -> [];
_ ->
[get_col(Matrix, I) || I <- lists:seq(1, length(hd(Matrix)))]
end.
%%
get_layer_name({Type, Size, _}) ->
atom_to_list(Type) ++ "_" ++ integer_to_list(Size);
get_layer_name({Type, Rate}) ->
atom_to_list(Type) ++ "_" ++ float_to_list(Rate).
generate_dropout_mask(Input, Rate) ->
Size = matrix_dimensions(Input),
[[case rand:uniform() < Rate of true -> 0.0; false -> 1.0 end
|| _ <- lists:seq(1, Size)]
|| _ <- lists:seq(1, Size)].
element_wise_multiply(A, B) ->
[[X * Y || {X, Y} <- lists:zip(RowA, RowB)]
|| {RowA, RowB} <- lists:zip(A, B)].
sum_columns(Matrix) ->
lists:foldl(
fun(Row, Acc) ->
[X + Y || {X, Y} <- lists:zip(Row, Acc)]
end,
lists:duplicate(length(hd(Matrix)), 0.0),
Matrix
).
matrix_dimensions([]) -> {0, 0};
matrix_dimensions([[]|_]) -> {0, 0};
matrix_dimensions(Matrix) ->
{length(Matrix), length(hd(Matrix))}.
get_row(Matrix, I) ->
lists:nth(I, Matrix).
get_col(Matrix, J) ->
[lists:nth(J, Row) || Row <- Matrix].
%%
save_final_state(State) ->
Filename = "ml_model_" ++ format_timestamp() ++ ".state",
do_save_model(State, Filename).
format_timestamp() ->
{{Year, Month, Day}, {Hour, Minute, Second}} = calendar:universal_time(),
lists:flatten(io_lib:format("~4..0w~2..0w~2..0w_~2..0w~2..0w~2..0w",
[Year, Month, Day, Hour, Minute, Second])).
%
train_model(ModelData, TrainingData) ->
%
%
maps:merge(ModelData, TrainingData).
% 使
predict_with_model(ModelData, InputData) ->
%
% 使
{prediction, 0.75}.
%
compute_metrics(State) ->
%
#{accuracy => 0.8, loss => 0.2}.

+ 19
- 1
src/performance_optimization.erl Parādīt failu

@ -1,7 +1,7 @@
-module(performance_optimization).
-behaviour(gen_server).
-export([start_link/0, init/1, handle_call/3, handle_cast/2]).
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-export([optimize_resources/0, get_performance_stats/0]).
-record(state, {
@ -51,6 +51,24 @@ handle_cast(optimize, State) ->
handle_cast(_Msg, State) ->
{noreply, State}.
%%
handle_info(run_optimization, State) ->
ResourceUsage = analyze_resource_usage(),
OptimizationActions = calculate_optimization_actions(ResourceUsage, State#state.optimization_rules),
NewState = apply_optimization_actions(OptimizationActions, State),
schedule_optimization(),
{noreply, NewState#state{resource_usage = ResourceUsage}};
handle_info(_Info, State) ->
{noreply, State}.
%%
terminate(_Reason, _State) ->
ok.
%%
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%
calculate_optimization_actions(ResourceUsage, Rules) ->
%

+ 17
- 12
src/score_system.erl Parādīt failu

@ -44,17 +44,20 @@ handle_call({update_score, PlayerName, GameResult, Points}, _From, State = #stat
{NewScore, NewWins, NewLosses} = case GameResult of
win -> {Score + Points, Wins + 1, Losses};
loss -> {Score - Points, Wins, Losses + 1}
loss -> {Score - Points, Wins, Losses + 1};
draw -> {Score, Wins, Losses}
end,
NewScores = maps:put(PlayerName, {NewScore, NewWins, NewLosses}, Scores),
NewLeaderboard = update_leaderboard(NewScores),
{reply, {ok, {NewScore, NewWins, NewLosses}},
State#state{scores = NewScores, leaderboard = NewLeaderboard}};
{reply, {ok, {NewScore, NewWins, NewLosses}}, State#state{scores = NewScores, leaderboard = NewLeaderboard}};
handle_call(get_leaderboard, _From, State = #state{leaderboard = Leaderboard}) ->
{reply, {ok, Leaderboard}, State}.
{reply, {ok, Leaderboard}, State};
handle_call(_Request, _From, State) ->
{reply, {error, unknown_call}, State}.
handle_cast(_Msg, State) ->
{noreply, State}.
@ -68,13 +71,15 @@ terminate(_Reason, _State) ->
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
%%
%%
%
update_leaderboard(Scores) ->
List = maps:to_list(Scores),
SortedList = lists:sort(
fun({_, {Score1, _, _}}, {_, {Score2, _, _}}) ->
Score1 >= Score2
end,
List
%
ScoresList = maps:to_list(Scores),
SortedScores = lists:sort(
fun({_, {Score1, _, _}}, {_, {Score2, _, _}}) -> Score1 > Score2 end,
ScoresList
),
lists:sublist(SortedList, 10).
% 10
lists:sublist(SortedScores, 10).

+ 0
- 53
src/strategy_optimizer.erl Parādīt failu

@ -1,53 +0,0 @@
-module(strategy_optimizer).
-export([optimize_strategy/2, evaluate_strategy/2, adapt_strategy/3]).
-record(strategy_state, {
current_strategy,
performance_metrics,
adaptation_rate,
optimization_history
}).
optimize_strategy(Strategy, GameState) ->
%
SituationAnalysis = analyze_current_situation(GameState),
%
StrategyVariants = generate_strategy_variants(Strategy, SituationAnalysis),
%
EvaluatedVariants = evaluate_strategy_variants(StrategyVariants, GameState),
%
select_best_strategy(EvaluatedVariants).
evaluate_strategy(Strategy, GameState) ->
%
ControlScore = evaluate_control_ability(Strategy, GameState),
%
TempoScore = evaluate_tempo_management(Strategy, GameState),
%
RiskScore = evaluate_risk_management(Strategy, GameState),
%
ResourceScore = evaluate_resource_utilization(Strategy, GameState),
%
calculate_overall_score([
{ControlScore, 0.3},
{TempoScore, 0.25},
{RiskScore, 0.25},
{ResourceScore, 0.2}
]).
adapt_strategy(Strategy, GameState, Performance) ->
%
PerformanceAnalysis = analyze_performance(Performance),
%
AdjustmentDirection = determine_adjustment(PerformanceAnalysis),
%
generate_adapted_strategy(Strategy, AdjustmentDirection, GameState).

+ 0
- 58
src/visualization.erl Parādīt failu

@ -1,58 +0,0 @@
-module(visualization).
-behaviour(gen_server).
-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
-export([create_chart/2, update_chart/2, export_chart/2]).
-record(state, {
charts = #{}, %
renderers = #{}, %
export_formats = [png, svg, pdf]
}).
%% API
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
create_chart(ChartType, Data) ->
gen_server:call(?MODULE, {create_chart, ChartType, Data}).
update_chart(ChartId, NewData) ->
gen_server:call(?MODULE, {update_chart, ChartId, NewData}).
export_chart(ChartId, Format) ->
gen_server:call(?MODULE, {export_chart, ChartId, Format}).
%%
initialize_renderers() ->
#{
line_chart => fun draw_line_chart/2,
bar_chart => fun draw_bar_chart/2,
pie_chart => fun draw_pie_chart/2,
scatter_plot => fun draw_scatter_plot/2
}.
draw_line_chart(Data, Options) ->
% 线
{ok, generate_line_chart(Data, Options)}.
draw_bar_chart(Data, Options) ->
%
{ok, generate_bar_chart(Data, Options)}.
draw_pie_chart(Data, Options) ->
%
{ok, generate_pie_chart(Data, Options)}.
draw_scatter_plot(Data, Options) ->
%
{ok, generate_scatter_plot(Data, Options)}.
export_to_format(Chart, Format) ->
%
case Format of
png -> export_to_png(Chart);
svg -> export_to_svg(Chart);
pdf -> export_to_pdf(Chart)
end.

+ 0
- 2
斗地主.md Parādīt failu

@ -27,7 +27,6 @@
3. **系统支持模块**
- parallel_compute.erl: 并行计算
- performance_monitor.erl: 性能监控
- visualization.erl: 可视化分析
## 功能特性
@ -146,7 +145,6 @@ c(optimizer).
c(deep_learning).
c(parallel_compute).
c(performance_monitor).
c(visualization).
c(ai_test).
% 运行测试

Notiek ielāde…
Atcelt
Saglabāt