A chess library for Gleam
2
fork

Configure Feed

Select the types of activity you want to include in your feed.

Order moves before searching to improve alpha-beta pruning

+116 -21
+11
src/starfish/internal/board.gleam
··· 33 33 King 34 34 } 35 35 36 + pub fn piece_value(piece: Piece) -> Int { 37 + case piece { 38 + Pawn -> 100 39 + Knight -> 300 40 + Bishop -> 300 41 + Rook -> 500 42 + Queen -> 900 43 + King -> 1000 44 + } 45 + } 46 + 36 47 pub const pawn_promotions = [Bishop, Knight, Rook, Queen] 37 48 38 49 pub type Colour {
+2 -13
src/starfish/internal/evaluate.gleam
··· 7 7 8 8 /// Statically evaluates a position. Does not take into account checkmate or 9 9 /// stalemate, those must be accounted for beforehand. 10 - pub fn evaluate(game: game.Game, legal_moves: List(move.Move)) -> Int { 10 + pub fn evaluate(game: game.Game, legal_moves: List(#(move.Move, Int))) -> Int { 11 11 evaluate_position(game) + list.length(legal_moves) 12 12 } 13 13 14 14 fn evaluate_position(game: game.Game) -> Int { 15 15 use eval, position, #(piece, colour) <- dict.fold(game.board, 0) 16 16 let score = 17 - piece_score(piece) + piece_table.piece_score(piece, colour, position) 17 + board.piece_value(piece) + piece_table.piece_score(piece, colour, position) 18 18 case colour == game.to_move { 19 19 True -> eval + score 20 20 False -> eval - score 21 21 } 22 22 } 23 - 24 - fn piece_score(piece: board.Piece) -> Int { 25 - case piece { 26 - board.King -> 0 27 - board.Pawn -> 100 28 - board.Knight -> 300 29 - board.Bishop -> 300 30 - board.Rook -> 500 31 - board.Queen -> 900 32 - } 33 - }
+103 -8
src/starfish/internal/search.gleam
··· 1 1 import gleam/bool 2 + import gleam/int 3 + import gleam/list 2 4 import gleam/option.{type Option, None, Some} 5 + import starfish/internal/board 3 6 import starfish/internal/evaluate 4 7 import starfish/internal/game.{type Game} 5 8 import starfish/internal/hash 6 9 import starfish/internal/move.{type Move} 10 + import starfish/internal/piece_table 7 11 8 12 /// Not really infinity, but a high enough number that nothing but explicit 9 13 /// references to it will reach it. ··· 16 20 17 21 pub fn best_move(game: Game, until: Until) -> Result(Move, Nil) { 18 22 use <- bool.guard(until(0), Error(Nil)) 19 - let legal_moves = move.legal(game) 23 + let legal_moves = order_moves(game) 20 24 use <- bool.guard(legal_moves == [], Error(Nil)) 21 25 iterative_deepening(game, 1, None, legal_moves, hash.new_table(), until) 22 26 } ··· 25 29 game: Game, 26 30 depth: Int, 27 31 best_move: Option(Move), 28 - legal_moves: List(Move), 32 + legal_moves: List(#(Move, Int)), 29 33 cached_positions: hash.Table, 30 34 until: Until, 31 35 ) -> Result(Move, Nil) { ··· 49 53 game, 50 54 depth + 1, 51 55 Some(best_move), 52 - legal_moves, 56 + // TODO: Instead of just sorting the best move to the front, maybe we 57 + // can sort all the moves by evaluation? 58 + reorder_moves(legal_moves, best_move), 53 59 cached_positions, 54 60 until, 55 61 ) ··· 79 85 game: Game, 80 86 cached_positions: hash.Table, 81 87 depth: Int, 82 - legal_moves: List(Move), 88 + legal_moves: List(#(Move, Int)), 83 89 best_move: Option(Move), 84 90 best_eval: Int, 85 91 until: Until, ··· 91 97 Some(best_move) -> 92 98 Ok(TopLevelSearchResult(best_move:, cached_positions:)) 93 99 } 94 - [move, ..moves] -> { 100 + [#(move, _), ..moves] -> { 95 101 let SearchResult(eval:, cached_positions:, eval_kind: _, finished:) = 96 102 search( 97 103 move.apply(game, move), ··· 162 168 Ok(#(eval, eval_kind)) -> 163 169 SearchResult(eval:, cached_positions:, eval_kind:, finished: True) 164 170 Error(_) -> 165 - case move.legal(game) { 171 + case order_moves(game) { 166 172 // If the game is in a checkmate or stalemate position, the game is over, so 167 173 // we stop searching. 168 174 [] -> { ··· 245 251 fn search_loop( 246 252 game: Game, 247 253 cached_positions: hash.Table, 248 - moves: List(Move), 254 + moves: List(#(Move, Int)), 249 255 depth: Int, 250 256 // The best evaluation we've encountered so far. 251 257 best_eval: Int, ··· 265 271 eval_kind:, 266 272 finished: True, 267 273 ) 268 - [move, ..moves] -> { 274 + [#(move, _), ..moves] -> { 269 275 // Evaluate the position for the opponent. The negative of the opponent's 270 276 // eval is our eval. 271 277 let SearchResult( ··· 317 323 } 318 324 } 319 325 } 326 + 327 + /// Sort moves by their guessed evaluation. We return the guesses with the moves 328 + /// in order to save iterating the list a second time. The guesses are discarded 329 + /// after this point. 330 + fn order_moves(game: Game) -> List(#(Move, Int)) { 331 + game 332 + |> move.legal 333 + |> collect_guessed_eval(game, []) 334 + |> list.sort(fn(a, b) { int.compare(a.1, b.1) }) 335 + } 336 + 337 + /// Reorder already ordered moves to move the best move to the front of the list, 338 + /// so that it will be searched first on the next iteration. 339 + fn reorder_moves( 340 + moves: List(#(Move, Int)), 341 + best_move: Move, 342 + ) -> List(#(Move, Int)) { 343 + let moves_without_best = list.filter(moves, fn(pair) { pair.0 != best_move }) 344 + [#(best_move, 0), ..moves_without_best] 345 + } 346 + 347 + fn collect_guessed_eval( 348 + moves: List(Move), 349 + game: Game, 350 + acc: List(#(Move, Int)), 351 + ) -> List(#(Move, Int)) { 352 + case moves { 353 + [] -> acc 354 + [move, ..moves] -> 355 + collect_guessed_eval(moves, game, [#(move, guess_eval(game, move)), ..acc]) 356 + } 357 + } 358 + 359 + /// Rate captures and promotions higher than quiet moves 360 + const capture_promotion_bonus = 10_000 361 + 362 + /// Guess the evaluation of a move so we can hopefully search moves in a better 363 + /// order than random. Searching better moves first improves alpha-beta pruning, 364 + /// allowing us to search more positions. 365 + fn guess_eval(game: Game, move: Move) -> Int { 366 + let assert board.Occupied(piece:, colour:) = board.get(game.board, move.from) 367 + as "Invalid move trying to move empty piece" 368 + 369 + let moving_piece = case move { 370 + move.Promotion(piece:, ..) -> piece 371 + move.Capture(..) | move.Castle(..) | move.EnPassant(..) | move.Move(..) -> 372 + piece 373 + } 374 + 375 + let from_score = piece_table.piece_score(moving_piece, colour, move.from) 376 + let to_score = piece_table.piece_score(moving_piece, colour, move.to) 377 + 378 + let position_improvement = to_score - from_score 379 + let move_specific_score = case move { 380 + // TODO store information in moves so we don't have to retrieve it from the 381 + // board every time. 382 + move.Capture(..) -> { 383 + let assert board.Occupied(piece: captured_piece, colour: _) = 384 + board.get(game.board, move.to) 385 + as "Invalid capture moving to empty square" 386 + 387 + capture_promotion_bonus 388 + // Capturing a more valuable piece is better, and using a less valuable 389 + // piece to capture is usually better. However, we prioritise the value of 390 + // the captured piece. 391 + + board.piece_value(captured_piece) 392 + * 2 393 + * -board.piece_value(moving_piece) 394 + } 395 + move.EnPassant(..) -> capture_promotion_bonus 396 + move.Promotion(..) -> { 397 + // Promotions can also be captures 398 + let capture_value = case board.get(game.board, move.to) { 399 + board.Empty | board.OffBoard -> 0 400 + board.Occupied(piece: captured_piece, colour: _) -> 401 + board.piece_value(captured_piece) 402 + * 2 403 + - board.piece_value(moving_piece) 404 + } 405 + 406 + // Promoting to a more valuable piece is usually better 407 + capture_promotion_bonus + capture_value + board.piece_value(move.piece) 408 + } 409 + // For castling and quite moves, we can't easily predict the score 410 + move.Castle(..) | move.Move(..) -> 0 411 + } 412 + 413 + position_improvement + move_specific_score 414 + }