1 /*===========================================================================*/
2 /*                                                                           */
3 /* This file is part of the SYMPHONY MILP Solver Framework.                  */
4 /*                                                                           */
5 /* SYMPHONY was jointly developed by Ted Ralphs (ted@lehigh.edu) and         */
6 /* Laci Ladanyi (ladanyi@us.ibm.com).                                        */
7 /*                                                                           */
8 /* (c) Copyright 2000-2019 Ted Ralphs. All Rights Reserved.                  */
9 /*                                                                           */
10 /* This software is licensed under the Eclipse Public License. Please see    */
11 /* accompanying file for terms.                                              */
12 /*                                                                           */
13 /*===========================================================================*/
14 
15 #include <memory.h>
16 #include <math.h>
17 #include <string.h>
18 
19 #include "sym_lp.h"
20 #include "sym_constants.h"
21 #include "sym_macros.h"
22 #include "sym_types.h"
23 
24 /*===========================================================================*/
25 
26 /*===========================================================================*\
27  * This file contains LP functions related to row operations.
28 \*===========================================================================*/
29 
check_row_effectiveness(lp_prob * p)30 int check_row_effectiveness(lp_prob *p)
31 {
32    int ineff_cnt_to_delete = p->par.ineff_cnt_to_delete;
33    int orig_eff = p->par.base_constraints_always_effective;
34    LPdata *lp_data = p->lp_data;
35    //double *dualsol = lp_data->dualsol;
36    double lpetol = lp_data->lpetol;
37    double lpetol10 = 10*lpetol;
38    row_data *row, *rows = lp_data->rows;
39    int m = lp_data->m;
40 
41    int bcutnum = p->base.cutnum;
42 
43    double slack, *slacks = lp_data->slacks;
44    int *free_rows;
45    int ineffective, deletable, violated, i, j, k;
46 
47    char *stat;
48    int  *now_ineff, *outrhsind, *inrhsind, *slackstat;
49 
50    stat = lp_data->tmp.c; /* m */
51 
52    /* slacks is already filled up. We got the slacks before calling
53     * fix_variables.
54     * Now based on their slack values, mark each row whether it's
55     * violated, loose or tight */
56 
57    //int base_m = orig_eff ? bcutnum : 0;
58 
59    for (i = m - 1; i >= 0; i--){
60       slack = slacks[i];
61       switch (rows[i].cut->sense){
62        case 'E':
63 	 if (slack < -lpetol10 || slack > lpetol10) stat[i] = VIOLATED_ROW;
64 	 else                                   stat[i] = TIGHT_ROW;
65 	 break;
66        case 'L':
67 	 if (slack > lpetol10)       stat[i] = SLACK_ROW;
68 	 else if (slack > -lpetol10) stat[i] = TIGHT_ROW;
69 	 else                        stat[i] = VIOLATED_ROW;
70 	 break;
71        case 'G':
72 	 if (slack < -lpetol10)     stat[i] = SLACK_ROW;
73 	 else if (slack < lpetol10) stat[i] = TIGHT_ROW;
74 	 else                       stat[i] = VIOLATED_ROW;
75 	 break;
76        case 'R':
77 	 if (rows[i].cut->range < 0){
78 	    if (slack > rows[i].cut->range + lpetol10 || slack < -lpetol10)
79 	       stat[i] = SLACK_ROW;
80 	    else if (slack > rows[i].cut->range - lpetol10 || slack < lpetol10)
81 	       stat[i] = TIGHT_ROW;
82 	    else
83 	       stat[i] = VIOLATED_ROW;
84 	 }else{
85 	    if (slack < rows[i].cut->range - lpetol10 || slack > lpetol10)
86 	       stat[i] = SLACK_ROW;
87 	    else if (slack < rows[i].cut->range + lpetol10 || slack > - lpetol10)
88 	       stat[i] = TIGHT_ROW;
89 	    else
90 	       stat[i] = VIOLATED_ROW;
91 	 }
92 	 break;
93       }
94    }
95 
96    /* Now set the branch values appropriately */
97    if (p->par.branch_on_cuts){
98       for (i=m-1; i>=0; i--){
99 	 if (stat[i] == SLACK_ROW){
100 	    if ((rows[i].cut->branch & ALLOWED_TO_BRANCH_ON))
101 	       rows[i].cut->branch ^= SWITCH_CANDIDATE_ALLOWED;
102 	 }else{
103 	    if ((rows[i].cut->branch & CANDIDATE_FOR_BRANCH))
104 	       rows[i].cut->branch ^= SWITCH_CANDIDATE_ALLOWED;
105 	 }
106       }
107    }
108 
109    /*========================================================================*\
110     * A few words of wisdom:
111     *
112     * If a row wasn't free then everything is nice.
113     * If it was free then there are complications because its slack variable
114     * will be in basis and the corresponding dual variable is 0.
115     *
116     * Keep in mind the objective: if violated then make it constraining,
117     * if tight and free keep it free, if slack, make it free.
118     *
119     * Also, base constraints and branching cuts may be ineffective but they
120     * are not deletable.
121     *   So be careful!
122    \*========================================================================*/
123 
124    violated = ineffective = 0;
125 
126    /* we'll first use slackstat then outrhsind. no conflict */
127    slackstat = outrhsind = lp_data->tmp.i1;
128    inrhsind = outrhsind + m;
129    now_ineff = inrhsind + m;
130    if (p->par.ineffective_constraints != NO_CONSTRAINT_IS_INEFFECTIVE){
131       /* Deal with the violated rows */
132       for (i = orig_eff ? bcutnum : 0; i < m; i++){
133 	 if (stat[i] == VIOLATED_ROW){ /* must have been free */
134 	    rows[i].free = FALSE;
135 	    rows[i].eff_cnt = 0;
136 	    rows[i].ineff_cnt = 0;
137 	    inrhsind[violated++] = i;
138 	 }
139       }
140       /* Collect the rows that are deemed ineffective now */
141       switch (p->par.ineffective_constraints){
142        case NONZERO_SLACKS_ARE_INEFFECTIVE:
143 	 for (i = orig_eff ? bcutnum : 0; i < m; i++){
144 	    if (stat[i] == SLACK_ROW ||
145 		(stat[i] == TIGHT_ROW && rows[i].free == TRUE)){
146 	       now_ineff[ineffective++] = i;
147 	    }else{
148 	       rows[i].eff_cnt++;
149 	    }
150 	 }
151 	 break;
152        case BASIC_SLACKS_ARE_INEFFECTIVE:
153 	 /* for violated free rows the slack is in basis! */
154 	 get_basis(lp_data, NULL, slackstat);
155 	 for (i = orig_eff ? bcutnum : 0; i < m; i++){
156 	    if (slackstat[i] == SLACK_BASIC && stat[i] != VIOLATED_ROW){
157 	       now_ineff[ineffective++] = i;
158 	    }else{
159 	       rows[i].eff_cnt++;
160 	    }
161 	 }
162 	 break;
163        case ZERO_DUAL_VALUES_ARE_INEFFECTIVE:
164 	 for (i = orig_eff ? bcutnum : 0; i < m; i++){
165  	    if (fabs(lp_data->dualsol[i]) < lpetol && stat[i] != VIOLATED_ROW){
166  	       now_ineff[ineffective++] = i;
167  	    }else{
168  	       rows[i].eff_cnt++;
169 	    }
170 	 }
171 	 break;
172       }
173       /* Now violated rows have eff_cnt = 1 (not that it matters...) */
174    }
175 
176    deletable = k = 0;
177    for (j = ineffective - 1; j >= 0; j--){
178 
179       row = rows + (i = now_ineff[j]);
180 
181       //if(p->bc_level > 100 && !(row->deletable))row->deletable = TRUE;
182 
183       if (!row->free && row->deletable){
184 	 row->free = TRUE;
185 	 row->ineff_cnt = stat[i] == TIGHT_ROW ? 0 : ((MAXINT) >> 1);
186 	 outrhsind[k++] = i;
187       }
188       row->ineff_cnt++;
189       if (i >= bcutnum && ! (row->cut->branch & CUT_BRANCHED_ON) &&
190 	  row->deletable && row->ineff_cnt >= ineff_cnt_to_delete )
191         deletable++;
192    }
193 
194    /* stat is not used any more so its location can be used in
195       constrain_row_set and free_row_set, but for integer tmp array
196       they have to use the area behind in/outrhsind. This area was used
197       for now_ineff, but we don't need that anymore either. */
198 
199    if (violated > 0)
200       constrain_row_set(lp_data, violated, inrhsind);
201 
202    if (k > 0)
203       free_row_set(lp_data, k, outrhsind);
204 
205    PRINT(p->par.verbosity, 3,
206 	 ("Row effectiveness: rownum: %i ineff: %i deletable: %i\n",
207 	  m, ineffective, deletable));
208    if (p->par.verbosity > 6 && ineffective){
209       printf("   Ineffective row(s):");
210       for (i=0; i<m; i++){
211 	 if (rows[i].free)
212 	    printf(" %i", i);
213       }
214       printf("\n");
215    }
216 
217    /*------------------------------------------------------------------------*\
218     * Finally, remove the deletable rows if there are enough to remove
219    \*------------------------------------------------------------------------*/
220 
221    if (deletable > p->par.mat_row_compress_ratio * m &&
222        deletable > p->par.mat_row_compress_num){
223       PRINT(p->par.verbosity, 3, ("   Removing deletable rows ...\n"));
224       if (p->par.branch_on_cuts)
225 	 p->slack_cuts = (cut_data **) realloc(p->slack_cuts,
226 			 (p->slack_cut_num + deletable) * sizeof(cut_data *));
227 
228       free_rows = lp_data->tmp.i1;
229       if (bcutnum > 0)
230 	 memset(free_rows, FALSE, bcutnum * ISIZE);
231       /* remember, by now every ineffective row is free and ineff_cnt is
232 	 positive only for free rows */
233       for (k = i = bcutnum; i < m; i++){
234 	 row = rows + i;
235 	 if (row->free && ! (row->cut->branch & CUT_BRANCHED_ON) &&
236 	     row->ineff_cnt >= ineff_cnt_to_delete){
237 	    free_rows[i] = TRUE;
238 	    if (row->cut->branch & CANDIDATE_FOR_BRANCH){
239 #ifdef DO_TESTS
240 	       if (!p->par.branch_on_cuts)
241 		  printf("No branch_on_cuts but a CANDIDATE_FOR_BRANCH!\n\n");
242 #endif
243 	       p->slack_cuts[p->slack_cut_num++] = row->cut;
244 	       row->cut = NULL;
245 	    }else{
246 #ifdef COMPILE_IN_LP /*we don't want to free rows that have a name if we are
247 		       using shared memory because they are still being used*/
248 	       if (row->cut->name < 0)
249 #endif
250 		  free_cut(&(row->cut));
251 	    }
252 	 }else{
253 	    free_rows[i] = FALSE;
254 	    rows[k++] = rows[i];
255 	 }
256       }
257       delete_rows(lp_data, deletable, free_rows);
258       p->lp_stat.cuts_deleted_from_lps += deletable;
259       if (p->bc_level > 0) {
260          p->lp_stat.num_cuts_slacked_out_in_path += deletable;
261       }
262    }
263    PRINT(p->par.verbosity, 3, ("\n"));
264 
265    return(violated);
266 }
267 
268 /*===========================================================================*/
269 
add_row_set(lp_prob * p,waiting_row ** wrows,int length)270 void add_row_set(lp_prob *p, waiting_row **wrows, int length)
271 {
272    int i;
273    row_data *row;
274    add_waiting_rows(p, wrows, length);
275    row = p->lp_data->rows + (p->lp_data->m - length);
276 
277    for (i=0; i<length; i++, row++){
278       row->free = FALSE;
279       row->cut = wrows[i]->cut;
280       row->eff_cnt = 1;
281       row->deletable = wrows[i]->cut->deletable;
282       wrows[i]->cut = NULL;
283    }
284    free_waiting_rows(wrows, length);
285 }
286 
287 /*===========================================================================*/
288 
add_new_rows_to_waiting_rows(lp_prob * p,waiting_row ** new_rows,int new_row_num)289 void add_new_rows_to_waiting_rows(lp_prob *p, waiting_row **new_rows,
290 				  int new_row_num)
291 {
292    new_row_num = compute_violations(p, new_row_num, new_rows);
293 
294    if (new_row_num > 0){
295       /* check to be sure there is enough room in the row set data
296        * structure for the new rows -- otherwise reallocate memory */
297       REALLOC(p->waiting_rows, waiting_row *, p->waiting_rows_size,
298 	      p->waiting_row_num + new_row_num, BB_BUNCH);
299       memcpy((p->waiting_rows + p->waiting_row_num), new_rows,
300 	     new_row_num * sizeof(waiting_row *));
301       p->waiting_row_num += new_row_num;
302    }
303 }
304 
305 /*===========================================================================*/
306 
307 /*===========================================================================*\
308  * This function orders the waiting rows so that the cuts sent by the same
309  * process are grouped together, but otherwise preserving the order of
310  * arrival.
311  * The ordering is done in ascending order wrt the source_pid field of each
312  * waiting_row. Newly arriving cuts have the correct value here, cuts already
313  * in the local pool get MAXINT, so they are considered last.
314 
315  * NOTE: This ensures that results are reproducible, even with with multiple
316  * cut pools/generators, as long as we never time out.
317 \*===========================================================================*/
318 
order_waiting_rows_based_on_sender(lp_prob * p)319 void order_waiting_rows_based_on_sender(lp_prob *p)
320 {
321    waiting_row **wrows = p->waiting_rows;
322    waiting_row *wtmp;
323    const int wrownum = p->waiting_row_num;
324    int i, j;
325    /* Do a simple bubble sort */
326    for (i = 1; i < wrownum; ++i) {
327       wtmp = wrows[i];
328       for (j = i - 1; j >= 0; --j) {
329 	 if (wtmp->source_pid >= wrows[j]->source_pid) {
330 	    break;
331 	 } else {
332 	    wrows[j+1] = wrows[j];
333 	 }
334       }
335       wrows[j+1] = wtmp;
336    }
337 }
338 
339 /*===========================================================================*/
340 
341 /*===========================================================================*\
342  * Order the cuts in waiting_rows based on their violation then pick
343  * the best k, and add those to the problem
344 \============================================================================*/
345 
add_best_waiting_rows(lp_prob * p)346 int add_best_waiting_rows(lp_prob *p)
347 {
348    int i, added_rows;
349    row_data *rows;
350    int max_cut_num_per_iter = (p->bc_level<1)?p->par.max_cut_num_per_iter_root:
351                                             p->par.max_cut_num_per_iter;
352 
353    added_rows = MIN(max_cut_num_per_iter, p->waiting_row_num);
354    if (added_rows < p->waiting_row_num)
355       qsort((char *)p->waiting_rows, p->waiting_row_num,
356 	    sizeof(waiting_row *), waiting_row_comp);
357    if (added_rows){
358       print_stat_on_cuts_added_u(p, added_rows);
359       add_row_set(p, p->waiting_rows, added_rows);
360       rows = p->lp_data->rows + (p->lp_data->m - added_rows);
361       for (i=0; i<added_rows; i++){
362 	 rows[i].eff_cnt = 1;
363       }
364       if (added_rows < p->waiting_row_num)
365 	 memmove(p->waiting_rows, p->waiting_rows + added_rows,
366 	       (p->waiting_row_num - added_rows) * sizeof(waiting_row *));
367       p->waiting_row_num -= added_rows;
368    }
369    return(added_rows);
370 }
371 
372 /*===========================================================================*/
373 
add_waiting_rows(lp_prob * p,waiting_row ** wrows,int add_row_num)374 void add_waiting_rows(lp_prob *p, waiting_row **wrows, int add_row_num)
375 {
376    LPdata *lp_data = p->lp_data;
377    char *sense;
378    double *rhs, *rmatval;
379    int *rmatbeg, *rmatind;
380    int i, nzcnt;
381    waiting_row *wrow;
382 
383    for (nzcnt=0, i=add_row_num-1; i>=0; i--)
384       nzcnt += wrows[i]->nzcnt;
385    size_lp_arrays(lp_data, TRUE, FALSE, add_row_num, 0, nzcnt);
386    sense = lp_data->tmp.c; /* m */
387    rhs = lp_data->tmp.d; /* m */
388    REMALLOC(lp_data->tmp.dv, double, lp_data->tmp.dv_size, nzcnt,
389          5*(int)BB_BUNCH);
390    rmatval = lp_data->tmp.dv; /* nzcnt */
391    rmatbeg = lp_data->tmp.i1;
392    REMALLOC(lp_data->tmp.iv, int, lp_data->tmp.iv_size, nzcnt, 5*(int)BB_BUNCH);
393    rmatind = lp_data->tmp.iv;
394    *rmatbeg = 0;
395    for (i = 0; i < add_row_num; i++){
396       wrow = wrows[i];
397       rhs[i] = wrow->cut->rhs;
398       sense[i] = wrow->cut->sense;
399       memcpy(rmatind + rmatbeg[i], wrow->matind, wrow->nzcnt * ISIZE);
400       memcpy(rmatval + rmatbeg[i], wrow->matval, wrow->nzcnt * DSIZE);
401       rmatbeg[i+1] = rmatbeg[i] + wrow->nzcnt;
402    }
403    add_rows(lp_data, add_row_num, nzcnt, rhs, sense, rmatbeg, rmatind,rmatval);
404    for (i = add_row_num - 1; i >= 0; i--){
405       if (sense[i] == 'R')
406 	 change_range(lp_data, lp_data->m+i, wrows[i]->cut->range);
407    }
408 }
409 
410 /*===========================================================================*\
411  * This function is compares two waiting rows. Needed for ordering the cuts by
412  * degree of violation.
413 \*===========================================================================*/
414 
waiting_row_comp(const void * wr0,const void * wr1)415 int waiting_row_comp(const void *wr0, const void *wr1)
416 {
417    double v0 = (*((waiting_row **)wr0))->violation;
418    double v1 = (*((waiting_row **)wr1))->violation;
419    return(v0 < v1 ? 1 : (v0 > v1 ?  -1 : 0));
420 }
421 
422 /*===========================================================================*/
423 
compute_violations(lp_prob * p,int new_row_num,waiting_row ** new_rows)424 int compute_violations(lp_prob *p, int new_row_num, waiting_row **new_rows)
425 {
426    waiting_row *wrow;
427    int *matind, i, j;
428    double *matval, lpetol = p->lp_data->lpetol, lhs, *x = p->lp_data->x;
429    cut_data *cut;
430 
431    for (i = 0; i < new_row_num; ){
432       wrow = new_rows[i];
433       matind = wrow->matind;
434       matval = wrow->matval;
435       for (lhs=0, j = wrow->nzcnt-1; j>=0; j--)
436 	 lhs += matval[j] * x[matind[j]];
437       cut = wrow->cut;
438       switch (cut->sense){
439        case 'L': wrow->violation = lhs - cut->rhs;       break;
440        case 'G': wrow->violation = cut->rhs - lhs;       break;
441        case 'E': wrow->violation = fabs(lhs - cut->rhs); break;
442        case 'R':
443 	 wrow->violation =
444 	    lhs < cut->rhs ? cut->rhs - lhs : lhs - cut->rhs - cut->range;
445 	 break;
446       }
447       if  (wrow->violation < lpetol){
448 	 free_waiting_row(new_rows+i);
449 	 new_rows[i] = new_rows[--new_row_num];
450       }else{
451 	 i++;
452       }
453    }
454    return(new_row_num);
455 }
456 
457 /*===========================================================================*/
458 
compress_slack_cuts(lp_prob * p)459 void compress_slack_cuts(lp_prob *p)
460 {
461    int i, snum = p->slack_cut_num;
462    cut_data **slack_cuts = p->slack_cuts;
463 
464    for (i=0; i<snum; ){
465       if (slack_cuts[i] == NULL){
466 	 slack_cuts[i] = slack_cuts[--snum];
467       }else{
468 	 i++;
469       }
470    }
471    p->slack_cut_num = snum;
472 }
473 
474