From 1e0559f3863f4a82b83766872c05ddf3306e8af8 Mon Sep 17 00:00:00 2001 From: MrGeorgen Date: Wed, 6 Aug 2025 12:36:47 +0200 Subject: [PATCH] Test all game versions --- nimm.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/nimm.py b/nimm.py index 3715785..2ad8ecc 100644 --- a/nimm.py +++ b/nimm.py @@ -13,7 +13,10 @@ def optimal_move(): return n return random.randint(1, 3) # Wenn keine perfekte Wahl, dann irgendein Zug -def ki_move(lastMove): +def random_move(): + return random.randint(1, 3) + +def ki_move(lastMove, better_version): global state antiMoves = lostMoves.get(state) if antiMoves == None: @@ -26,7 +29,8 @@ def ki_move(lastMove): return i + 1 # Es gibt keine guten Züge mehr, also muss der letzte Zug schlecht gewesen sein. - addLostMove(lastMove.state, lastMove.move) + if better_version: + addLostMove(lastMove.state, lastMove.move) return random.randint(1, 3) def makeMove(move): # gibt True zurück, wenn der Spieler verloren hat @@ -44,13 +48,13 @@ def addLostMove(state, move): moves.append(move) lostMoves[state] = moves -def game(train): +def game(train, opponent, better_version): global state state = 12 lastMove = None while True: # KI beginnt, sonst kann sie nicht gewinnen - move = ki_move(lastMove) + move = ki_move(lastMove, better_version) lost = makeMove(move) if lost: if train: @@ -58,7 +62,7 @@ def game(train): return 0 # optimale Strategie hat gewonnen lastMove = Move(state + move, move) - move = optimal_move() + move = opponent() lost = makeMove(move) if lost: return 1 # KI hat gewonnen @@ -67,13 +71,19 @@ state = 12 lostMoves = {} # train -for _ in range(1000): - game(True) +opponents = [random_move, optimal_move] +for ki_version in [False, True]: + for train_opponent in opponents: + for eval_opponent in opponents: + ki_text = "optimale Version" if ki_version else "erste Version" + print(f"KI: {ki_text} trainiert mit {train_opponent.__name__} und evaluiert mit {eval_opponent.__name__}") + for _ in range(1000): + game(train=True, opponent=train_opponent, better_version=ki_version) -# eval -numberEvalGames = 100000 -wonGames = 0 -for _ in range(numberEvalGames): - wonGames += game(False) + # eval + numberEvalGames = 100000 + wonGames = 0 + for _ in range(numberEvalGames): + wonGames += game(False, opponent=eval_opponent, better_version=ki_version) -print(f"Die KI hat {wonGames / numberEvalGames * 100}% der Spiele gewonnen.") + print(f"Die KI hat {wonGames / numberEvalGames * 100}% der Spiele gewonnen.\n")