1 // This file is part of Freecell Solver. It is subject to the license terms in
2 // the COPYING.txt file found in the top-level directory of this distribution
3 // and at http://fc-solve.shlomifish.org/docs/distro/COPYING.html . No part of
4 // Freecell Solver, including this file, may be copied, modified, propagated,
5 // or distributed except according to the terms contained in the COPYING file.
6 //
7 // Copyright (c) 2000 Shlomi Fish
8 // scans.c - The code that relates to the various scans.
9 // Currently Soft-DFS / Random-DFS, Best-FS and Breadth-FS are implemented.
10 
11 #include "move_stack_compact_alloc.h"
12 
13 #undef DEBUG
14 
15 #include "scans_impl.h"
16 
17 // GCC does not handle inline functions as well as macros.
18 #define kv_calc_depth(ptr_state)                                               \
19     calc_depth(FCS_STATE_kv_to_collectible(ptr_state))
20 
21 #ifdef FCS_RCS_STATES
22 // TODO : Unit-test this function as it had had a bug beforehand
23 // because lru_side had been an unsigned long.
24 typedef const char *lru_side;
25 
26 extern int __attribute__((pure))
fc_solve_compare_lru_cache_keys(const void * const void_a,const void * const void_b,void * const context GCC_UNUSED)27 fc_solve_compare_lru_cache_keys(const void *const void_a,
28     const void *const void_b, void *const context GCC_UNUSED)
29 {
30 #define GET_PARAM(p) ((lru_side)(((const fcs_cache_key_info *)(p))->val_ptr))
31     const lru_side a = GET_PARAM(void_a), b = GET_PARAM(void_b);
32 
33     return ((a > b) ? 1 : (a < b) ? (-1) : 0);
34 #undef GET_PARAM
35 }
36 
37 #define NEXT_CACHE_STATE(s) ((s)->lower_pri)
fc_solve_lookup_state_key_from_val(fcs_instance * const instance,const fcs_collectible_state * const orig_ptr_state_val)38 fcs_state *fc_solve_lookup_state_key_from_val(fcs_instance *const instance,
39     const fcs_collectible_state *const orig_ptr_state_val)
40 {
41 #if (FCS_RCS_CACHE_STORAGE == FCS_RCS_CACHE_STORAGE_JUDY)
42     PWord_t PValue;
43 #endif
44     FC__STACKS__SET_PARAMS();
45     fcs_lru_cache *cache = &(instance->rcs_states_cache);
46 
47     ssize_t parents_stack_len = 1;
48     ssize_t parents_stack_max_len = 16;
49 
50     struct
51     {
52         fcs_cache_key_info *new_cache_state;
53         const fcs_collectible_state *state_val;
54     } *parents_stack = SMALLOC(parents_stack, (size_t)parents_stack_max_len);
55 
56     parents_stack[0].state_val = orig_ptr_state_val;
57 
58     fcs_cache_key_info *new_cache_state;
59     while (1)
60     {
61 #if (FCS_RCS_CACHE_STORAGE == FCS_RCS_CACHE_STORAGE_JUDY)
62         JLI(PValue, cache->states_values_to_keys_map,
63             ((Word_t)parents_stack[parents_stack_len - 1].state_val));
64         if (*PValue)
65         {
66             parents_stack[parents_stack_len - 1].new_cache_state =
67                 new_cache_state = (fcs_cache_key_info *)(*PValue);
68             break;
69         }
70         else
71         {
72             // A new state.
73             if (cache->recycle_bin)
74             {
75                 new_cache_state = cache->recycle_bin;
76                 cache->recycle_bin = NEXT_CACHE_STATE(new_cache_state);
77             }
78             else
79             {
80                 new_cache_state = fcs_compact_alloc_ptr(
81                     &(cache->states_values_to_keys_allocator),
82                     sizeof(*new_cache_state));
83             }
84         }
85 #else
86         if (cache->recycle_bin)
87         {
88             new_cache_state = cache->recycle_bin;
89             cache->recycle_bin = NEXT_CACHE_STATE(new_cache_state);
90         }
91         else
92         {
93             new_cache_state =
94                 fcs_compact_alloc_ptr(&(cache->states_values_to_keys_allocator),
95                     sizeof(*new_cache_state));
96         }
97 
98         new_cache_state->val_ptr =
99             parents_stack[parents_stack_len - 1].state_val;
100         fcs_cache_key_info *const existing_cache_state =
101             (fcs_cache_key_info *)fc_solve_kaz_tree_alloc_insert(
102                 cache->kaz_tree, new_cache_state);
103 
104         if (existing_cache_state)
105         {
106             NEXT_CACHE_STATE(new_cache_state) = cache->recycle_bin;
107             cache->recycle_bin = new_cache_state;
108 
109             parents_stack[parents_stack_len - 1].new_cache_state =
110                 new_cache_state = existing_cache_state;
111             break;
112         }
113 #endif
114 
115         parents_stack[parents_stack_len - 1].new_cache_state = new_cache_state;
116 
117 #if (FCS_RCS_CACHE_STORAGE == FCS_RCS_CACHE_STORAGE_JUDY)
118         *PValue = ((Word_t)new_cache_state);
119 
120         new_cache_state->val_ptr =
121             parents_stack[parents_stack_len - 1].state_val;
122 #endif
123         new_cache_state->lower_pri = new_cache_state->higher_pri = NULL;
124         ++cache->count_elements_in_cache;
125 
126         if (!FCS_S_PARENT(parents_stack[parents_stack_len - 1].state_val))
127         {
128             new_cache_state->key = instance->state_copy.s;
129             break;
130         }
131         else
132         {
133             parents_stack[parents_stack_len].state_val =
134                 FCS_S_PARENT(parents_stack[parents_stack_len - 1].state_val);
135             if (++parents_stack_len == parents_stack_max_len)
136             {
137                 parents_stack_max_len += 16;
138                 const_AUTO(old_parents_stack, parents_stack);
139                 parents_stack =
140                     SREALLOC(parents_stack, (size_t)parents_stack_max_len);
141                 if (unlikely(!parents_stack))
142                 {
143                     free(old_parents_stack);
144                 }
145             }
146         }
147     }
148 
149     for (--parents_stack_len; parents_stack_len > 0; --parents_stack_len)
150     {
151         new_cache_state = parents_stack[parents_stack_len - 1].new_cache_state;
152 
153         fcs_state *const pass_key = &(new_cache_state->key);
154         *pass_key = parents_stack[parents_stack_len].new_cache_state->key;
155 
156         const fcs_move_stack *const stack_ptr__moves_to_parent =
157             parents_stack[parents_stack_len - 1].state_val->moves_to_parent;
158         const fcs_internal_move *next_move = stack_ptr__moves_to_parent->moves;
159         const fcs_internal_move *const moves_end =
160             (next_move + stack_ptr__moves_to_parent->num_moves);
161 
162         for (; next_move < moves_end; next_move++)
163         {
164             fc_solve_apply_move(pass_key, NULL,
165                 (*next_move)PASS_FREECELLS(LOCAL_FREECELLS_NUM)
166                     PASS_STACKS(LOCAL_STACKS_NUM));
167         }
168         // The state->parent_state moves stack has an implicit canonize
169         // suffix move.
170         fc_solve_canonize_state(pass_key PASS_FREECELLS(LOCAL_FREECELLS_NUM)
171                 PASS_STACKS(LOCAL_STACKS_NUM));
172 
173         // Promote new_cache_state to the head of the priority list.
174         if (!cache->lowest_pri)
175         {
176             // It's the only state.
177             cache->lowest_pri = new_cache_state;
178             cache->highest_pri = new_cache_state;
179         }
180         else
181         {
182             // First remove the state from its place in the doubly-linked
183             // list by linking its neighbours together.
184             if (new_cache_state->higher_pri)
185             {
186                 new_cache_state->higher_pri->lower_pri =
187                     new_cache_state->lower_pri;
188             }
189 
190             if (new_cache_state->lower_pri)
191             {
192                 new_cache_state->lower_pri->higher_pri =
193                     new_cache_state->higher_pri;
194             }
195             // Bug fix: make sure that ->lowest_pri is always valid.
196             else if (new_cache_state->higher_pri)
197             {
198                 cache->lowest_pri = new_cache_state->higher_pri;
199             }
200 
201             // Now promote it to be the highest.
202             cache->highest_pri->higher_pri = new_cache_state;
203             new_cache_state->lower_pri = cache->highest_pri;
204             new_cache_state->higher_pri = NULL;
205             cache->highest_pri = new_cache_state;
206         }
207     }
208 
209     free(parents_stack);
210 
211     var_AUTO(count, cache->count_elements_in_cache);
212     const_AUTO(limit, cache->max_num_elements_in_cache);
213 
214     while (count > limit)
215     {
216         fcs_cache_key_info *lowest_pri = cache->lowest_pri;
217 #if (FCS_RCS_CACHE_STORAGE == FCS_RCS_CACHE_STORAGE_JUDY)
218         int rc_int;
219         JLD(rc_int, cache->states_values_to_keys_map,
220             (Word_t)(lowest_pri->val_ptr));
221 #else
222         fc_solve_kaz_tree_delete_free(cache->kaz_tree,
223             fc_solve_kaz_tree_lookup(cache->kaz_tree, lowest_pri));
224 #endif
225 
226         cache->lowest_pri = lowest_pri->higher_pri;
227         cache->lowest_pri->lower_pri = NULL;
228 
229         NEXT_CACHE_STATE(lowest_pri) = cache->recycle_bin;
230 
231         cache->recycle_bin = lowest_pri;
232         --count;
233     }
234 
235     cache->count_elements_in_cache = count;
236 
237     return &(new_cache_state->key);
238 }
239 
240 #undef NEXT_CACHE_STATE
241 #endif
242 
243 #ifdef DEBUG
244 #define TRACE0(message)                                                        \
245     fcs_trace("BestFS - %s ; Iters=%ld.\n", message,                           \
246         (long)(*instance_num_checked_states_ptr))
247 #else
248 #define TRACE0(no_use)
249 #endif
250 
251 #define my_brfs_queue (BRFS_VAR(soft_thread, bfs_queue))
252 #define my_brfs_queue_last_item (BRFS_VAR(soft_thread, bfs_queue_last_item))
253 #define my_brfs_recycle_bin (BRFS_VAR(soft_thread, recycle_bin))
254 
255 #define NEW_BRFS_QUEUE_ITEM()                                                  \
256     ((fcs_states_linked_list_item *)fcs_compact_alloc_ptr(                     \
257         &(HT_FIELD(hard_thread, allocator)),                                   \
258         sizeof(fcs_states_linked_list_item)));
259 
fc_solve_initialize_bfs_queue(fcs_soft_thread * const soft_thread)260 static inline void fc_solve_initialize_bfs_queue(
261     fcs_soft_thread *const soft_thread)
262 {
263     fcs_hard_thread *const hard_thread = soft_thread->hard_thread;
264 
265     // Initialize the BFS queue. We have one dummy element at the beginning
266     // in order to make operations simpler.
267     my_brfs_queue = NEW_BRFS_QUEUE_ITEM();
268     my_brfs_queue_last_item = my_brfs_queue->next = NEW_BRFS_QUEUE_ITEM();
269     my_brfs_queue_last_item->next = NULL;
270     my_brfs_recycle_bin = NULL;
271 }
272 
fc_solve_soft_thread_init_befs_or_bfs(fcs_soft_thread * const soft_thread)273 void fc_solve_soft_thread_init_befs_or_bfs(fcs_soft_thread *const soft_thread)
274 {
275     if (soft_thread->is_befs)
276     {
277 #define WEIGHTING(soft_thread) (&(BEFS_VAR(soft_thread, weighting)))
278         // Initialize the priority queue of the BeFS scan
279         fc_solve_pq_init(&(BEFS_VAR(soft_thread, pqueue)));
280         fc_solve_initialize_befs_rater(soft_thread, WEIGHTING(soft_thread));
281     }
282     else
283     {
284         fc_solve_initialize_bfs_queue(soft_thread);
285     }
286 #ifndef FCS_ZERO_FREECELLS_MODE
287     if (!BEFS_M_VAR(soft_thread, moves_list))
288     {
289         size_t num = 0;
290         fcs_move_func *moves_list = NULL;
291 
292         for (size_t group_idx = 0;
293              group_idx < soft_thread->by_depth_moves_order.by_depth_moves[0]
294                              .moves_order.num;
295              ++group_idx)
296         {
297             add_to_move_funcs_list(&moves_list, &num,
298                 soft_thread->by_depth_moves_order.by_depth_moves[0]
299                     .moves_order.groups[group_idx]
300                     .move_funcs,
301                 soft_thread->by_depth_moves_order.by_depth_moves[0]
302                     .moves_order.groups[group_idx]
303                     .num);
304         }
305         BEFS_M_VAR(soft_thread, moves_list) = moves_list;
306         BEFS_M_VAR(soft_thread, moves_list_end) = moves_list + num;
307     }
308 #endif
309 
310     BEFS_M_VAR(soft_thread, first_state_to_check) =
311         FCS_STATE_keyval_pair_to_collectible(
312             &fcs_st_instance(soft_thread)->state_copy);
313 }
314 
befs__insert_derived_states(fcs_soft_thread * const soft_thread,fcs_hard_thread * const hard_thread,fcs_instance * instance GCC_UNUSED,const bool is_befs,fcs_derived_states_list derived,pri_queue * const pqueue,fcs_states_linked_list_item ** queue_last_item)315 static inline void befs__insert_derived_states(
316     fcs_soft_thread *const soft_thread, fcs_hard_thread *const hard_thread,
317     fcs_instance *instance GCC_UNUSED, const bool is_befs,
318     fcs_derived_states_list derived, pri_queue *const pqueue,
319     fcs_states_linked_list_item **queue_last_item)
320 {
321     fcs_derived_states_list_item *derived_iter, *derived_end;
322     for (derived_end = (derived_iter = derived.states) + derived.num_states;
323          derived_iter < derived_end; derived_iter++)
324     {
325         const_AUTO(scans_ptr_new_state, derived_iter->state_ptr);
326         if (is_befs)
327         {
328 #ifdef FCS_RCS_STATES
329             fcs_kv_state new_pass = {.key = fc_solve_lookup_state_key_from_val(
330                                          instance, scans_ptr_new_state),
331                 .val = scans_ptr_new_state};
332 #else
333             fcs_kv_state new_pass =
334                 FCS_STATE_keyval_pair_to_kv(scans_ptr_new_state);
335 #endif
336             fc_solve_pq_push(pqueue, scans_ptr_new_state,
337                 befs_rate_state(soft_thread, WEIGHTING(soft_thread),
338                     new_pass.key, BEFS_MAX_DEPTH - kv_calc_depth(&(new_pass))));
339         }
340         else
341         {
342             // Enqueue the new state.
343             fcs_states_linked_list_item *last_item_next;
344 
345             if (my_brfs_recycle_bin)
346             {
347                 last_item_next = my_brfs_recycle_bin;
348                 my_brfs_recycle_bin = my_brfs_recycle_bin->next;
349             }
350             else
351             {
352                 last_item_next = NEW_BRFS_QUEUE_ITEM();
353             }
354 
355             queue_last_item[0]->next = last_item_next;
356 
357             queue_last_item[0]->s = scans_ptr_new_state;
358             last_item_next->next = NULL;
359             queue_last_item[0] = last_item_next;
360         }
361     }
362 }
363 
364 // fc_solve_befs_or_bfs_do_solve() is the main event loop of the BeFS And
365 // BFS scans. It is quite simple as all it does is extract elements out of
366 // the queue or priority queue and run all the moves on them.
367 //
368 // It goes on in this fashion until the final state was reached or there are
369 // no more states in the queue.
fc_solve_befs_or_bfs_do_solve(fcs_soft_thread * const soft_thread)370 fc_solve_solve_process_ret_t fc_solve_befs_or_bfs_do_solve(
371     fcs_soft_thread *const soft_thread)
372 {
373     fcs_hard_thread *const hard_thread = soft_thread->hard_thread;
374     fcs_instance *const instance = HT_INSTANCE(hard_thread);
375 
376 #if defined(FCS_WITH_DEPTH_FIELD) &&                                           \
377     !defined(FCS_HARD_CODE_CALC_REAL_DEPTH_AS_FALSE)
378     const bool calc_real_depth = fcs_get_calc_real_depth(instance);
379 #endif
380 #ifndef FCS_HARD_CODE_SCANS_SYNERGY_AS_TRUE
381     const bool scans_synergy =
382         STRUCT_QUERY_FLAG(instance, FCS_RUNTIME_SCANS_SYNERGY);
383 #endif
384     const_AUTO(soft_thread_id, soft_thread->id);
385     const bool is_a_complete_scan =
386         STRUCT_QUERY_FLAG(soft_thread, FCS_SOFT_THREAD_IS_A_COMPLETE_SCAN);
387 #ifndef FCS_DISABLE_NUM_STORED_STATES
388     const_SLOT(effective_max_num_states_in_collection, instance);
389 #endif
390 
391     fc_solve_solve_process_ret_t error_code;
392     fcs_derived_states_list derived = {
393         .num_states = 0, .max_num_states = 0, .states = NULL};
394 
395     const fcs_move_func *const moves_list = BEFS_M_VAR(soft_thread, moves_list);
396     const fcs_move_func *const moves_list_end =
397         BEFS_M_VAR(soft_thread, moves_list_end);
398 
399     DECLARE_STATE();
400     PTR_STATE = BEFS_M_VAR(soft_thread, first_state_to_check);
401     FCS_ASSIGN_STATE_KEY();
402 #ifndef FCS_ENABLE_PRUNE__R_TF__UNCOND
403     const bool enable_pruning = soft_thread->enable_pruning;
404 #endif
405 
406     fcs_iters_int *const instance_num_checked_states_ptr =
407         &(instance->i__stats.num_checked_states);
408 #ifndef FCS_SINGLE_HARD_THREAD
409     fcs_iters_int *const hard_thread_num_checked_states_ptr =
410         &(HT_FIELD(hard_thread, ht__num_checked_states));
411 #endif
412     const_SLOT(is_befs, soft_thread);
413 #ifdef FCS_WITH_MOVES
414     const_SLOT(is_optimize_scan, soft_thread);
415 #endif
416 
417     pri_queue *const pqueue = &(BEFS_VAR(soft_thread, pqueue));
418     fcs_states_linked_list_item *queue = my_brfs_queue;
419     fcs_states_linked_list_item *queue_last_item = my_brfs_queue_last_item;
420     FC__STACKS__SET_PARAMS();
421     const_AUTO(max_num_states, calc_ht_max_num_states(instance, hard_thread));
422 #ifndef FCS_WITHOUT_ITER_HANDLER
423     const_SLOT(debug_iter_output_func, instance);
424     const_SLOT(debug_iter_output_context, instance);
425 #endif
426 
427     int8_t *const befs_positions_by_rank =
428         (BEFS_M_VAR(soft_thread, befs_positions_by_rank));
429 
430     while (PTR_STATE != NULL)
431     {
432         TRACE0("Start of loop");
433         // If we do the pruning after checking for being visited, then
434         // there's a risk of inconsistent result when being interrupted
435         // because we check once for the pruned state (after the scan
436         // was suspended) and another time for the uninterrupted state.
437         //
438         // Therefore, we prune before checking for the visited flags.
439         if (fcs__should_state_be_pruned(enable_pruning, PTR_STATE))
440         {
441             fcs_collectible_state *const after_pruning_state =
442                 fc_solve_sfs_raymond_prune(soft_thread, pass);
443             if (after_pruning_state)
444             {
445                 ASSIGN_ptr_state(after_pruning_state);
446             }
447         }
448 
449         register const int temp_visited = FCS_S_VISITED(PTR_STATE);
450 
451         // If this is an optimization scan and the state being checked is
452         // not in the original solution path - move on to the next state
453         // If the state has already been visited - move on to the next
454         // state.
455         if (
456 #ifdef FCS_WITH_MOVES
457             is_optimize_scan
458                 ? ((!(temp_visited & FCS_VISITED_IN_SOLUTION_PATH)) ||
459                       (temp_visited & FCS_VISITED_IN_OPTIMIZED_PATH))
460                 :
461 #endif
462                 ((temp_visited & FCS_VISITED_DEAD_END) ||
463                     (is_scan_visited(PTR_STATE, soft_thread_id))))
464         {
465             goto next_state;
466         }
467 
468         TRACE0("Counting cells");
469         if (check_if_limits_exceeded())
470         {
471             BEFS_M_VAR(soft_thread, first_state_to_check) = PTR_STATE;
472             TRACE0("error_code - FCS_STATE_SUSPEND_PROCESS");
473             error_code = FCS_STATE_SUSPEND_PROCESS;
474             goto my_return_label;
475         }
476 
477 #ifndef FCS_WITHOUT_ITER_HANDLER
478         TRACE0("debug_iter_output");
479         if (debug_iter_output_func)
480         {
481             debug_iter_output_func(debug_iter_output_context,
482                 (fcs_int_limit_t) * (instance_num_checked_states_ptr),
483                 calc_depth(PTR_STATE), (void *)instance, &pass,
484 #ifdef FCS_WITHOUT_VISITED_ITER
485                 0
486 #else
487                 ((FCS_S_PARENT(PTR_STATE) == NULL)
488                         ? 0
489                         : (fcs_int_limit_t)FCS_S_VISITED_ITER(
490                               FCS_S_PARENT(PTR_STATE)))
491 #endif
492             );
493         }
494 #endif
495 
496         const fcs_game_limit num_vacant_freecells = count_num_vacant_freecells(
497             LOCAL_FREECELLS_NUM, &FCS_SCANS_the_state);
498         const fcs_game_limit num_vacant_stacks =
499             count_num_vacant_stacks(LOCAL_STACKS_NUM, &FCS_SCANS_the_state);
500         if ((num_vacant_stacks == LOCAL_STACKS_NUM) &&
501             (num_vacant_freecells == LOCAL_FREECELLS_NUM))
502         {
503             BUMP_NUM_CHECKED_STATES();
504             error_code = FCS_STATE_WAS_SOLVED;
505             goto my_return_label;
506         }
507 
508         calculate_real_depth(calc_real_depth, PTR_STATE);
509 
510         soft_thread->num_vacant_freecells = num_vacant_freecells;
511         soft_thread->num_vacant_stacks = num_vacant_stacks;
512         fc_solve__calc_positions_by_rank_data(
513             soft_thread, &FCS_SCANS_the_state, befs_positions_by_rank);
514 
515         TRACE0("perform_moves");
516         // Do all the tests at one go, because that is the way it should be
517         // done for BFS and BeFS.
518         derived.num_states = 0;
519         for (const fcs_move_func *move_func_ptr = moves_list;
520              move_func_ptr < moves_list_end; move_func_ptr++)
521         {
522             move_func_ptr->f(soft_thread, pass, &derived);
523         }
524 
525         if (is_a_complete_scan)
526         {
527             FCS_S_VISITED(PTR_STATE) |= FCS_VISITED_ALL_TESTS_DONE;
528         }
529 
530         BUMP_NUM_CHECKED_STATES();
531         TRACE0("Insert all states");
532         befs__insert_derived_states(soft_thread, hard_thread, instance, is_befs,
533             derived, pqueue, &queue_last_item);
534 #ifdef FCS_WITH_MOVES
535         if (is_optimize_scan)
536         {
537             FCS_S_VISITED(PTR_STATE) |= FCS_VISITED_IN_OPTIMIZED_PATH;
538         }
539         else
540 #endif
541         {
542             set_scan_visited(PTR_STATE, soft_thread_id);
543 
544             if (derived.num_states == 0)
545             {
546                 if (is_a_complete_scan)
547                 {
548                     MARK_AS_DEAD_END(PTR_STATE);
549                 }
550             }
551         }
552 
553 #ifndef FCS_WITHOUT_VISITED_ITER
554         FCS_S_VISITED_ITER(PTR_STATE) = *(instance_num_checked_states_ptr)-1;
555 #endif
556 
557     next_state:
558         TRACE0("Label next state");
559         fcs_collectible_state *new_ptr_state;
560         if (is_befs)
561         {
562             // It is an BeFS scan
563             fc_solve_pq_pop(pqueue, &(new_ptr_state));
564         }
565         else
566         {
567             const_AUTO(save_item, queue->next);
568             if (save_item != queue_last_item)
569             {
570                 new_ptr_state = save_item->s;
571                 queue->next = save_item->next;
572                 save_item->next = my_brfs_recycle_bin;
573                 my_brfs_recycle_bin = save_item;
574             }
575             else
576             {
577                 new_ptr_state = NULL;
578             }
579         }
580         ASSIGN_ptr_state(new_ptr_state);
581     }
582 
583     error_code = FCS_STATE_IS_NOT_SOLVEABLE;
584 my_return_label:
585     FCS_SET_final_state();
586     if (derived.states != NULL)
587     {
588         free(derived.states);
589     }
590     my_brfs_queue_last_item = queue_last_item;
591 
592     return error_code;
593 }
594 
595 // These functions are used by the move functions in freecell.c and
596 // simpsim.c.
fc_solve_sfs_check_state_begin(fcs_hard_thread * const hard_thread,fcs_kv_state * const out_new_state_out,fcs_kv_state raw_state_raw SFS__PASS_MOVE_STACK (fcs_move_stack * const moves))597 int fc_solve_sfs_check_state_begin(fcs_hard_thread *const hard_thread,
598     fcs_kv_state *const out_new_state_out,
599     fcs_kv_state raw_state_raw SFS__PASS_MOVE_STACK(
600         fcs_move_stack *const moves))
601 {
602     fcs_collectible_state *raw_ptr_new_state;
603     fcs_instance *const instance = HT_INSTANCE(hard_thread);
604 
605     if ((HT_FIELD(hard_thread, allocated_from_list) =
606                 (instance->list_of_vacant_states != NULL)))
607     {
608         raw_ptr_new_state = instance->list_of_vacant_states;
609         instance->list_of_vacant_states =
610             FCS_S_NEXT(instance->list_of_vacant_states);
611     }
612     else
613     {
614         raw_ptr_new_state =
615             fcs_state_ia_alloc_into_var(&(HT_FIELD(hard_thread, allocator)));
616     }
617 
618     FCS_STATE_collectible_to_kv(out_new_state_out, raw_ptr_new_state);
619     fcs_duplicate_kv_state(out_new_state_out, &raw_state_raw);
620 #ifdef FCS_RCS_STATES
621 #define INFO_STATE_PTR(kv_ptr) ((kv_ptr)->val)
622 #else
623 // TODO : That's very hacky - get rid of it.
624 #define INFO_STATE_PTR(kv_ptr) ((fcs_state_keyval_pair *)((kv_ptr)->key))
625 #endif
626     // Some BeFS and BFS parameters that need to be initialized in
627     // the derived state.
628     FCS_S_PARENT(raw_ptr_new_state) = INFO_STATE_PTR(&raw_state_raw);
629 #ifdef FCS_WITH_MOVES
630     FCS_S_MOVES_TO_PARENT(raw_ptr_new_state) = moves;
631 #endif
632 // Make sure depth is consistent with the game graph.
633 // I.e: the depth of every newly discovered state is derived from
634 // the state from which it was discovered.
635 #ifdef FCS_WITH_DEPTH_FIELD
636     (FCS_S_DEPTH(raw_ptr_new_state))++;
637 #endif
638     // Mark this state as a state that was not yet visited
639     FCS_S_VISITED(raw_ptr_new_state) = 0;
640     // It's a newly created state which does not have children yet.
641     FCS_S_NUM_ACTIVE_CHILDREN(raw_ptr_new_state) = 0;
642     memset(&(FCS_S_SCAN_VISITED(raw_ptr_new_state)), '\0',
643         sizeof(FCS_S_SCAN_VISITED(raw_ptr_new_state)));
644     fcs_move_stack_reset(moves);
645 
646     return 0;
647 }
648 
fc_solve_sfs_check_state_end(fcs_soft_thread * const soft_thread,fcs_kv_state raw_state_raw,fcs_kv_state * const raw_ptr_new_state_raw FCS__pass_moves (fcs_move_stack * const moves GCC_UNUSED))649 extern fcs_collectible_state *fc_solve_sfs_check_state_end(
650     fcs_soft_thread *const soft_thread,
651 #ifndef FCS_HARD_CODE_REPARENT_STATES_AS_FALSE
652     fcs_kv_state raw_state_raw,
653 #endif
654     fcs_kv_state *const raw_ptr_new_state_raw FCS__pass_moves(
655         fcs_move_stack *const moves GCC_UNUSED))
656 {
657     const_SLOT(hard_thread, soft_thread);
658     const_AUTO(instance, HT_INSTANCE(hard_thread));
659 #if defined(FCS_WITH_DEPTH_FIELD) &&                                           \
660     !defined(FCS_HARD_CODE_CALC_REAL_DEPTH_AS_FALSE)
661     const bool calc_real_depth = fcs_get_calc_real_depth(instance);
662 #endif
663 #if !defined(FCS_HARD_CODE_REPARENT_STATES_AS_FALSE) &&                        \
664     !defined(FCS_HARD_CODE_SCANS_SYNERGY_AS_TRUE)
665     const bool scans_synergy =
666         STRUCT_QUERY_FLAG(instance, FCS_RUNTIME_SCANS_SYNERGY);
667 #endif
668     fcs_kv_state existing_state;
669 
670 #define ptr_new_state_foo (raw_ptr_new_state_raw->val)
671 
672     if (!fc_solve_check_and_add_state(
673             hard_thread, raw_ptr_new_state_raw, &existing_state))
674     {
675         if (HT_FIELD(hard_thread, allocated_from_list))
676         {
677             ptr_new_state_foo->parent = instance->list_of_vacant_states;
678             instance->list_of_vacant_states =
679                 INFO_STATE_PTR(raw_ptr_new_state_raw);
680         }
681         else
682         {
683             fcs_compact_alloc_release(&(HT_FIELD(hard_thread, allocator)));
684         }
685 
686 #ifdef FCS_WITH_DEPTH_FIELD
687         calculate_real_depth(
688             calc_real_depth, FCS_STATE_kv_to_collectible(&existing_state));
689 #endif
690 
691 // Re-parent the existing state to this one.
692 
693 // What it means is that if the depth of the state if it
694 // can be reached from this one is lower than what it
695 // already have, then re-assign its parent to this state.
696 #ifndef FCS_HARD_CODE_REPARENT_STATES_AS_FALSE
697 #define ptr_state (raw_state_raw.val)
698         if (STRUCT_QUERY_FLAG(instance, FCS_RUNTIME_TO_REPARENT_STATES_REAL) &&
699             (kv_calc_depth(&existing_state) >
700                 kv_calc_depth(&raw_state_raw) + 1))
701         {
702 #ifdef FCS_WITH_MOVES
703             // Make a copy of "moves" because "moves" will be destroyed
704             existing_state.val->moves_to_parent =
705                 fc_solve_move_stack_compact_allocate(hard_thread, moves);
706 #endif
707             if (!(existing_state.val->visited & FCS_VISITED_DEAD_END))
708             {
709                 if ((--(FCS_S_NUM_ACTIVE_CHILDREN(
710                         existing_state.val->parent))) == 0)
711                 {
712                     MARK_AS_DEAD_END(existing_state.val->parent);
713                 }
714                 ptr_state->num_active_children++;
715             }
716             existing_state.val->parent = INFO_STATE_PTR(&raw_state_raw);
717 #ifdef FCS_WITH_DEPTH_FIELD
718             existing_state.val->depth = ptr_state->depth + 1;
719 #endif
720         }
721 #endif
722         return FCS_STATE_kv_to_collectible(&existing_state);
723     }
724     else
725     {
726         return INFO_STATE_PTR(raw_ptr_new_state_raw);
727     }
728 }
729