1#@ s*: Label=FastTest
2#@ s2: DakotaConfig=HAVE_DOT
3#@ s3: DakotaConfig=HAVE_DOT
4# DAKOTA INPUT FILE - dakota_illum.in
5
6# This sample Dakota input file optimizes the illumination example
7# taken from course notes taught by Prof. Stephen Boyd at Stanford.
8#
9# the "hero solution" for the illumination problem found by using
10# ncsu_direct and then polishing that answer with conmin_frcg is
11# <<<<< Best parameters          =
12#                       1.0000000000e+00 x1
13#                       2.8186100902e-01 x2
14#                       0.0000000000e+00 x3
15#                       0.0000000000e+00 x4
16#                       0.0000000000e+00 x5
17#                       7.5621311116e-01 x6
18#                       1.0000000000e+00 x7
19# <<<<< Best objective function  =
20#                       1.0759888860e+00
21
22
23method,
24	optpp_q_newton,         			#s0
25#	optpp_newton					#s5
26#	optpp_pds,              			#s1
27#	dot_bfgs,					#s2
28#	dot_frcg,					#s3
29#	conmin_frcg,					#s4
30  	  max_iterations = 50,
31	  convergence_tolerance = 1e-4
32#	  scaling								#s2,#s3
33
34variables,
35	continuous_design = 7
36	  initial_point .5 .5 .5 .5 .5 .5 .5
37     	  lower_bounds   0. 0. 0. 0. 0. 0. 0.	#s0,#s2,#s3,#s4,#s5
38          upper_bounds   1. 1. 1. 1. 1. 1. 1.	#s0,#s2,#s3,#s4,#s5
39          descriptors   'x1' 'x2' 'x3' 'x4' 'x5' 'x6' 'x7'
40#	scale_type = 'value'									#s2,#s3
41#	scales     = 7 * .5									#s2
42#	scales     = 7 * .1									#s3
43
44interface,
45	direct
46	  analysis_drivers = 'illumination'
47
48responses,
49        objective_functions = 1
50#	no_gradients					#s1
51	numerical_gradients				#s0,#s2,#s3,#s4
52	  method_source dakota 				#s0,#s2,#s3,#s4
53	  interval_type central 			#s0,#s2,#s3,#s4
54	  fd_gradient_step_size = .000001		#s0,#s2,#s3,#s4
55	no_hessians					#s0,#s1,#s2,#s3,#s4
56#	analytic_gradients				#s5
57#	analytic_hessians				#s5
58