1#@ s*: Label=FastTest 2#@ p*: Label=AcceptanceTest 3#@ p*: MPIProcs=8 4#@ p1: CheckOutput='dakota.out.1' 5#@ p2: CheckOutput='dakota.out.1' 6#@ p5: CheckOutput='dakota.log.1' 7#@ p6: CheckOutput='dakota.out.1' 8#@ p9: CheckOutput='dakota.out.1' 9 10# DAKOTA INPUT FILE - dakota_multistart_ie.in 11 12# Demonstrate use of parallel iterators with parallel evaluations, in 13# combination with various user overrides of the automatic parallel 14# configuration 15# 0 - iterator_servers = 2. Yields a ded. master partition. 16# 1 - Add iterator_scheduling peer to force a peer partition. 17# 2 - evaluation_servers = 2. -> 4 iterator_servers 18# 3 - i_servers = 2, e_servers = 2 -> Idle processors. 19# 4 - i_servers = 2, e_scheduling master -> 2 evaluation servers of size 3. 20# 5 - i_servers = 4, ppi = 2, i_scheduling master. Ignore sched. request. 21# 6 - i_servers = 4, ppi = 2, e_servers = 2. Specify everything. 22# 7 - e_servers = 3, -> Idle processors 23# 8 - Reduced problem size to lower available eval concurrency 24# 9 - Reduced problem size + evaluation_scheduling master 25 26environment, 27 top_method_pointer = 'MS' 28# Test input file output redirection in parallel 29# output_file = 'dakota.log' #p5 30 31method, 32 id_method = 'MS' 33# iterator_servers = 2 #p0,#p1,#p3,#p4 34# iterator_servers = 4 #p5,#p6 35# processors_per_iterator 2 #p5,#p6 36# iterator_scheduling master #p5 37# iterator_scheduling peer #p1 38 multi_start 39 method_pointer = 'PS' 40 starting_points = -1.0 41 -0.5 42 0.5 43 1.0 44 45method, 46 id_method = 'PS' 47 vector_parameter_study 48 num_steps = 9 #s0,#p0,#p1,#p2,#p3,#p4,#p5,#p6,#p7 49# num_steps = 2 #p8,#p9 50 step_vector = 0.01 51 52variables, 53 continuous_design = 1 54 55interface, 56# evaluation_servers = 2 #p2,#p3,#p6 57# evaluation_servers = 3 #p7 58# processors_per_evaluation = 2 59# evaluation_scheduling master #p4,#p9 60 fork 61 analysis_driver = 'text_book' 62 63responses, 64 num_objective_functions = 1 65 no_gradients 66 no_hessians 67