1 /* CPU mode switching
2    Copyright (C) 1998-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "cfghooks.h"
27 #include "df.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "regs.h"
31 #include "emit-rtl.h"
32 #include "cfgrtl.h"
33 #include "cfganal.h"
34 #include "lcm.h"
35 #include "cfgcleanup.h"
36 #include "tree-pass.h"
37 
38 /* We want target macros for the mode switching code to be able to refer
39    to instruction attribute values.  */
40 #include "insn-attr.h"
41 
42 #ifdef OPTIMIZE_MODE_SWITCHING
43 
44 /* The algorithm for setting the modes consists of scanning the insn list
45    and finding all the insns which require a specific mode.  Each insn gets
46    a unique struct seginfo element.  These structures are inserted into a list
47    for each basic block.  For each entity, there is an array of bb_info over
48    the flow graph basic blocks (local var 'bb_info'), which contains a list
49    of all insns within that basic block, in the order they are encountered.
50 
51    For each entity, any basic block WITHOUT any insns requiring a specific
52    mode are given a single entry without a mode (each basic block in the
53    flow graph must have at least one entry in the segment table).
54 
55    The LCM algorithm is then run over the flow graph to determine where to
56    place the sets to the highest-priority mode with respect to the first
57    insn in any one block.  Any adjustments required to the transparency
58    vectors are made, then the next iteration starts for the next-lower
59    priority mode, till for each entity all modes are exhausted.
60 
61    More details can be found in the code of optimize_mode_switching.  */
62 
63 /* This structure contains the information for each insn which requires
64    either single or double mode to be set.
65    MODE is the mode this insn must be executed in.
66    INSN_PTR is the insn to be executed (may be the note that marks the
67    beginning of a basic block).
68    BBNUM is the flow graph basic block this insn occurs in.
69    NEXT is the next insn in the same basic block.  */
70 struct seginfo
71 {
72   int mode;
73   rtx_insn *insn_ptr;
74   int bbnum;
75   struct seginfo *next;
76   HARD_REG_SET regs_live;
77 };
78 
79 struct bb_info
80 {
81   struct seginfo *seginfo;
82   int computing;
83   int mode_out;
84   int mode_in;
85 };
86 
87 static struct seginfo * new_seginfo (int, rtx_insn *, int, HARD_REG_SET);
88 static void add_seginfo (struct bb_info *, struct seginfo *);
89 static void reg_dies (rtx, HARD_REG_SET *);
90 static void reg_becomes_live (rtx, const_rtx, void *);
91 
92 /* Clear ode I from entity J in bitmap B.  */
93 #define clear_mode_bit(b, j, i) \
94        bitmap_clear_bit (b, (j * max_num_modes) + i)
95 
96 /* Test mode I from entity J in bitmap B.  */
97 #define mode_bit_p(b, j, i) \
98        bitmap_bit_p (b, (j * max_num_modes) + i)
99 
100 /* Set mode I from entity J in bitmal B.  */
101 #define set_mode_bit(b, j, i) \
102        bitmap_set_bit (b, (j * max_num_modes) + i)
103 
104 /* Emit modes segments from EDGE_LIST associated with entity E.
105    INFO gives mode availability for each mode.  */
106 
107 static bool
108 commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info)
109 {
110   bool need_commit = false;
111 
112   for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
113     {
114       edge eg = INDEX_EDGE (edge_list, ed);
115       int mode;
116 
117       if ((mode = (int)(intptr_t)(eg->aux)) != -1)
118 	{
119 	  HARD_REG_SET live_at_edge;
120 	  basic_block src_bb = eg->src;
121 	  int cur_mode = info[src_bb->index].mode_out;
122 	  rtx_insn *mode_set;
123 
124 	  REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb));
125 
126 	  rtl_profile_for_edge (eg);
127 	  start_sequence ();
128 
129 	  targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge);
130 
131 	  mode_set = get_insns ();
132 	  end_sequence ();
133 	  default_rtl_profile ();
134 
135 	  /* Do not bother to insert empty sequence.  */
136 	  if (mode_set == NULL)
137 	    continue;
138 
139 	  /* We should not get an abnormal edge here.  */
140 	  gcc_assert (! (eg->flags & EDGE_ABNORMAL));
141 
142 	  need_commit = true;
143 	  insert_insn_on_edge (mode_set, eg);
144 	}
145     }
146 
147   return need_commit;
148 }
149 
150 /* Allocate a new BBINFO structure, initialized with the MODE, INSN,
151    and basic block BB parameters.
152    INSN may not be a NOTE_INSN_BASIC_BLOCK, unless it is an empty
153    basic block; that allows us later to insert instructions in a FIFO-like
154    manner.  */
155 
156 static struct seginfo *
157 new_seginfo (int mode, rtx_insn *insn, int bb, HARD_REG_SET regs_live)
158 {
159   struct seginfo *ptr;
160 
161   gcc_assert (!NOTE_INSN_BASIC_BLOCK_P (insn)
162 	      || insn == BB_END (NOTE_BASIC_BLOCK (insn)));
163   ptr = XNEW (struct seginfo);
164   ptr->mode = mode;
165   ptr->insn_ptr = insn;
166   ptr->bbnum = bb;
167   ptr->next = NULL;
168   COPY_HARD_REG_SET (ptr->regs_live, regs_live);
169   return ptr;
170 }
171 
172 /* Add a seginfo element to the end of a list.
173    HEAD is a pointer to the list beginning.
174    INFO is the structure to be linked in.  */
175 
176 static void
177 add_seginfo (struct bb_info *head, struct seginfo *info)
178 {
179   struct seginfo *ptr;
180 
181   if (head->seginfo == NULL)
182     head->seginfo = info;
183   else
184     {
185       ptr = head->seginfo;
186       while (ptr->next != NULL)
187 	ptr = ptr->next;
188       ptr->next = info;
189     }
190 }
191 
192 /* Record in LIVE that register REG died.  */
193 
194 static void
195 reg_dies (rtx reg, HARD_REG_SET *live)
196 {
197   int regno;
198 
199   if (!REG_P (reg))
200     return;
201 
202   regno = REGNO (reg);
203   if (regno < FIRST_PSEUDO_REGISTER)
204     remove_from_hard_reg_set (live, GET_MODE (reg), regno);
205 }
206 
207 /* Record in LIVE that register REG became live.
208    This is called via note_stores.  */
209 
210 static void
211 reg_becomes_live (rtx reg, const_rtx setter ATTRIBUTE_UNUSED, void *live)
212 {
213   int regno;
214 
215   if (GET_CODE (reg) == SUBREG)
216     reg = SUBREG_REG (reg);
217 
218   if (!REG_P (reg))
219     return;
220 
221   regno = REGNO (reg);
222   if (regno < FIRST_PSEUDO_REGISTER)
223     add_to_hard_reg_set ((HARD_REG_SET *) live, GET_MODE (reg), regno);
224 }
225 
226 /* Split the fallthrough edge to the exit block, so that we can note
227    that there NORMAL_MODE is required.  Return the new block if it's
228    inserted before the exit block.  Otherwise return null.  */
229 
230 static basic_block
231 create_pre_exit (int n_entities, int *entity_map, const int *num_modes)
232 {
233   edge eg;
234   edge_iterator ei;
235   basic_block pre_exit;
236 
237   /* The only non-call predecessor at this stage is a block with a
238      fallthrough edge; there can be at most one, but there could be
239      none at all, e.g. when exit is called.  */
240   pre_exit = 0;
241   FOR_EACH_EDGE (eg, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
242     if (eg->flags & EDGE_FALLTHRU)
243       {
244 	basic_block src_bb = eg->src;
245 	rtx_insn *last_insn;
246 	rtx ret_reg;
247 
248 	gcc_assert (!pre_exit);
249 	/* If this function returns a value at the end, we have to
250 	   insert the final mode switch before the return value copy
251 	   to its hard register.  */
252 	if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) == 1
253 	    && NONJUMP_INSN_P ((last_insn = BB_END (src_bb)))
254 	    && GET_CODE (PATTERN (last_insn)) == USE
255 	    && GET_CODE ((ret_reg = XEXP (PATTERN (last_insn), 0))) == REG)
256 	  {
257 	    int ret_start = REGNO (ret_reg);
258 	    int nregs = REG_NREGS (ret_reg);
259 	    int ret_end = ret_start + nregs;
260 	    bool short_block = false;
261 	    bool multi_reg_return = false;
262 	    bool forced_late_switch = false;
263 	    rtx_insn *before_return_copy;
264 
265 	    do
266 	      {
267 		rtx_insn *return_copy = PREV_INSN (last_insn);
268 		rtx return_copy_pat, copy_reg;
269 		int copy_start, copy_num;
270 		int j;
271 
272 		if (NONDEBUG_INSN_P (return_copy))
273 		  {
274 		    /* When using SJLJ exceptions, the call to the
275 		       unregister function is inserted between the
276 		       clobber of the return value and the copy.
277 		       We do not want to split the block before this
278 		       or any other call; if we have not found the
279 		       copy yet, the copy must have been deleted.  */
280 		    if (CALL_P (return_copy))
281 		      {
282 			short_block = true;
283 			break;
284 		      }
285 		    return_copy_pat = PATTERN (return_copy);
286 		    switch (GET_CODE (return_copy_pat))
287 		      {
288 		      case USE:
289 			/* Skip USEs of multiple return registers.
290 			   __builtin_apply pattern is also handled here.  */
291 			if (GET_CODE (XEXP (return_copy_pat, 0)) == REG
292 			    && (targetm.calls.function_value_regno_p
293 				(REGNO (XEXP (return_copy_pat, 0)))))
294 			  {
295 			    multi_reg_return = true;
296 			    last_insn = return_copy;
297 			    continue;
298 			  }
299 			break;
300 
301 		      case ASM_OPERANDS:
302 			/* Skip barrier insns.  */
303 			if (!MEM_VOLATILE_P (return_copy_pat))
304 			  break;
305 
306 			/* Fall through.  */
307 
308 		      case ASM_INPUT:
309 		      case UNSPEC_VOLATILE:
310 			last_insn = return_copy;
311 			continue;
312 
313 		      default:
314 			break;
315 		      }
316 
317 		    /* If the return register is not (in its entirety)
318 		       likely spilled, the return copy might be
319 		       partially or completely optimized away.  */
320 		    return_copy_pat = single_set (return_copy);
321 		    if (!return_copy_pat)
322 		      {
323 			return_copy_pat = PATTERN (return_copy);
324 			if (GET_CODE (return_copy_pat) != CLOBBER)
325 			  break;
326 			else if (!optimize)
327 			  {
328 			    /* This might be (clobber (reg [<result>]))
329 			       when not optimizing.  Then check if
330 			       the previous insn is the clobber for
331 			       the return register.  */
332 			    copy_reg = SET_DEST (return_copy_pat);
333 			    if (GET_CODE (copy_reg) == REG
334 				&& !HARD_REGISTER_NUM_P (REGNO (copy_reg)))
335 			      {
336 				if (INSN_P (PREV_INSN (return_copy)))
337 				  {
338 				    return_copy = PREV_INSN (return_copy);
339 				    return_copy_pat = PATTERN (return_copy);
340 				    if (GET_CODE (return_copy_pat) != CLOBBER)
341 				      break;
342 				  }
343 			      }
344 			  }
345 		      }
346 		    copy_reg = SET_DEST (return_copy_pat);
347 		    if (GET_CODE (copy_reg) == REG)
348 		      copy_start = REGNO (copy_reg);
349 		    else if (GET_CODE (copy_reg) == SUBREG
350 			     && GET_CODE (SUBREG_REG (copy_reg)) == REG)
351 		      copy_start = REGNO (SUBREG_REG (copy_reg));
352 		    else
353 		      {
354 			/* When control reaches end of non-void function,
355 			   there are no return copy insns at all.  This
356 			   avoids an ice on that invalid function.  */
357 			if (ret_start + nregs == ret_end)
358 			  short_block = true;
359 			break;
360 		      }
361 		    if (!targetm.calls.function_value_regno_p (copy_start))
362 		      copy_num = 0;
363 		    else
364 		      copy_num = hard_regno_nregs (copy_start,
365 						   GET_MODE (copy_reg));
366 
367 		    /* If the return register is not likely spilled, - as is
368 		       the case for floating point on SH4 - then it might
369 		       be set by an arithmetic operation that needs a
370 		       different mode than the exit block.  */
371 		    for (j = n_entities - 1; j >= 0; j--)
372 		      {
373 			int e = entity_map[j];
374 			int mode =
375 			  targetm.mode_switching.needed (e, return_copy);
376 
377 			if (mode != num_modes[e]
378 			    && mode != targetm.mode_switching.exit (e))
379 			  break;
380 		      }
381 		    if (j >= 0)
382 		      {
383 			/* __builtin_return emits a sequence of loads to all
384 			   return registers.  One of them might require
385 			   another mode than MODE_EXIT, even if it is
386 			   unrelated to the return value, so we want to put
387 			   the final mode switch after it.  */
388 			if (multi_reg_return
389 			    && targetm.calls.function_value_regno_p
390 			        (copy_start))
391 			  forced_late_switch = true;
392 
393 			/* For the SH4, floating point loads depend on fpscr,
394 			   thus we might need to put the final mode switch
395 			   after the return value copy.  That is still OK,
396 			   because a floating point return value does not
397 			   conflict with address reloads.  */
398 			if (copy_start >= ret_start
399 			    && copy_start + copy_num <= ret_end
400 			    && OBJECT_P (SET_SRC (return_copy_pat)))
401 			  forced_late_switch = true;
402 			break;
403 		      }
404 		    if (copy_num == 0)
405 		      {
406 			last_insn = return_copy;
407 			continue;
408 		      }
409 
410 		    if (copy_start >= ret_start
411 			&& copy_start + copy_num <= ret_end)
412 		      nregs -= copy_num;
413 		    else if (!multi_reg_return
414 			     || !targetm.calls.function_value_regno_p
415 				 (copy_start))
416 		      break;
417 		    last_insn = return_copy;
418 		  }
419 		/* ??? Exception handling can lead to the return value
420 		   copy being already separated from the return value use,
421 		   as in  unwind-dw2.c .
422 		   Similarly, conditionally returning without a value,
423 		   and conditionally using builtin_return can lead to an
424 		   isolated use.  */
425 		if (return_copy == BB_HEAD (src_bb))
426 		  {
427 		    short_block = true;
428 		    break;
429 		  }
430 		last_insn = return_copy;
431 	      }
432 	    while (nregs);
433 
434 	    /* If we didn't see a full return value copy, verify that there
435 	       is a plausible reason for this.  If some, but not all of the
436 	       return register is likely spilled, we can expect that there
437 	       is a copy for the likely spilled part.  */
438 	    gcc_assert (!nregs
439 			|| forced_late_switch
440 			|| short_block
441 			|| !(targetm.class_likely_spilled_p
442 			     (REGNO_REG_CLASS (ret_start)))
443 			|| nregs != REG_NREGS (ret_reg)
444 			/* For multi-hard-register floating point
445 		   	   values, sometimes the likely-spilled part
446 		   	   is ordinarily copied first, then the other
447 		   	   part is set with an arithmetic operation.
448 		   	   This doesn't actually cause reload
449 		   	   failures, so let it pass.  */
450 			|| (GET_MODE_CLASS (GET_MODE (ret_reg)) != MODE_INT
451 			    && nregs != 1));
452 
453 	    if (!NOTE_INSN_BASIC_BLOCK_P (last_insn))
454 	      {
455 		before_return_copy
456 		  = emit_note_before (NOTE_INSN_DELETED, last_insn);
457 		/* Instructions preceding LAST_INSN in the same block might
458 		   require a different mode than MODE_EXIT, so if we might
459 		   have such instructions, keep them in a separate block
460 		   from pre_exit.  */
461 		src_bb = split_block (src_bb,
462 				      PREV_INSN (before_return_copy))->dest;
463 	      }
464 	    else
465 	      before_return_copy = last_insn;
466 	    pre_exit = split_block (src_bb, before_return_copy)->src;
467 	  }
468 	else
469 	  {
470 	    pre_exit = split_edge (eg);
471 	  }
472       }
473 
474   return pre_exit;
475 }
476 
477 /* Find all insns that need a particular mode setting, and insert the
478    necessary mode switches.  Return true if we did work.  */
479 
480 static int
481 optimize_mode_switching (void)
482 {
483   int e;
484   basic_block bb;
485   bool need_commit = false;
486   static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
487 #define N_ENTITIES ARRAY_SIZE (num_modes)
488   int entity_map[N_ENTITIES];
489   struct bb_info *bb_info[N_ENTITIES];
490   int i, j;
491   int n_entities = 0;
492   int max_num_modes = 0;
493   bool emitted ATTRIBUTE_UNUSED = false;
494   basic_block post_entry = 0;
495   basic_block pre_exit = 0;
496   struct edge_list *edge_list = 0;
497 
498   /* These bitmaps are used for the LCM algorithm.  */
499   sbitmap *kill, *del, *insert, *antic, *transp, *comp;
500   sbitmap *avin, *avout;
501 
502   for (e = N_ENTITIES - 1; e >= 0; e--)
503     if (OPTIMIZE_MODE_SWITCHING (e))
504       {
505 	int entry_exit_extra = 0;
506 
507 	/* Create the list of segments within each basic block.
508 	   If NORMAL_MODE is defined, allow for two extra
509 	   blocks split from the entry and exit block.  */
510 	if (targetm.mode_switching.entry && targetm.mode_switching.exit)
511 	  entry_exit_extra = 3;
512 
513 	bb_info[n_entities]
514 	  = XCNEWVEC (struct bb_info,
515 		      last_basic_block_for_fn (cfun) + entry_exit_extra);
516 	entity_map[n_entities++] = e;
517 	if (num_modes[e] > max_num_modes)
518 	  max_num_modes = num_modes[e];
519       }
520 
521   if (! n_entities)
522     return 0;
523 
524   /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined.  */
525   gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit)
526 	      || (!targetm.mode_switching.entry
527 		  && !targetm.mode_switching.exit));
528 
529   if (targetm.mode_switching.entry && targetm.mode_switching.exit)
530     {
531       /* Split the edge from the entry block, so that we can note that
532 	 there NORMAL_MODE is supplied.  */
533       post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
534       pre_exit = create_pre_exit (n_entities, entity_map, num_modes);
535     }
536 
537   df_analyze ();
538 
539   /* Create the bitmap vectors.  */
540   antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
541 				n_entities * max_num_modes);
542   transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
543 				 n_entities * max_num_modes);
544   comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
545 			       n_entities * max_num_modes);
546   avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
547 			       n_entities * max_num_modes);
548   avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
549 				n_entities * max_num_modes);
550   kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun),
551 			       n_entities * max_num_modes);
552 
553   bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
554   bitmap_vector_clear (antic, last_basic_block_for_fn (cfun));
555   bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
556 
557   for (j = n_entities - 1; j >= 0; j--)
558     {
559       int e = entity_map[j];
560       int no_mode = num_modes[e];
561       struct bb_info *info = bb_info[j];
562       rtx_insn *insn;
563 
564       /* Determine what the first use (if any) need for a mode of entity E is.
565 	 This will be the mode that is anticipatable for this block.
566 	 Also compute the initial transparency settings.  */
567       FOR_EACH_BB_FN (bb, cfun)
568 	{
569 	  struct seginfo *ptr;
570 	  int last_mode = no_mode;
571 	  bool any_set_required = false;
572 	  HARD_REG_SET live_now;
573 
574 	  info[bb->index].mode_out = info[bb->index].mode_in = no_mode;
575 
576 	  REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb));
577 
578 	  /* Pretend the mode is clobbered across abnormal edges.  */
579 	  {
580 	    edge_iterator ei;
581 	    edge eg;
582 	    FOR_EACH_EDGE (eg, ei, bb->preds)
583 	      if (eg->flags & EDGE_COMPLEX)
584 		break;
585 	    if (eg)
586 	      {
587 		rtx_insn *ins_pos = BB_HEAD (bb);
588 		if (LABEL_P (ins_pos))
589 		  ins_pos = NEXT_INSN (ins_pos);
590 		gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos));
591 		if (ins_pos != BB_END (bb))
592 		  ins_pos = NEXT_INSN (ins_pos);
593 		ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now);
594 		add_seginfo (info + bb->index, ptr);
595 		for (i = 0; i < no_mode; i++)
596 		  clear_mode_bit (transp[bb->index], j, i);
597 	      }
598 	  }
599 
600 	  FOR_BB_INSNS (bb, insn)
601 	    {
602 	      if (INSN_P (insn))
603 		{
604 		  int mode = targetm.mode_switching.needed (e, insn);
605 		  rtx link;
606 
607 		  if (mode != no_mode && mode != last_mode)
608 		    {
609 		      any_set_required = true;
610 		      last_mode = mode;
611 		      ptr = new_seginfo (mode, insn, bb->index, live_now);
612 		      add_seginfo (info + bb->index, ptr);
613 		      for (i = 0; i < no_mode; i++)
614 			clear_mode_bit (transp[bb->index], j, i);
615 		    }
616 
617 		  if (targetm.mode_switching.after)
618 		    last_mode = targetm.mode_switching.after (e, last_mode,
619 							      insn);
620 
621 		  /* Update LIVE_NOW.  */
622 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
623 		    if (REG_NOTE_KIND (link) == REG_DEAD)
624 		      reg_dies (XEXP (link, 0), &live_now);
625 
626 		  note_stores (PATTERN (insn), reg_becomes_live, &live_now);
627 		  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
628 		    if (REG_NOTE_KIND (link) == REG_UNUSED)
629 		      reg_dies (XEXP (link, 0), &live_now);
630 		}
631 	    }
632 
633 	  info[bb->index].computing = last_mode;
634 	  /* Check for blocks without ANY mode requirements.
635 	     N.B. because of MODE_AFTER, last_mode might still
636 	     be different from no_mode, in which case we need to
637 	     mark the block as nontransparent.  */
638 	  if (!any_set_required)
639 	    {
640 	      ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now);
641 	      add_seginfo (info + bb->index, ptr);
642 	      if (last_mode != no_mode)
643 		for (i = 0; i < no_mode; i++)
644 		  clear_mode_bit (transp[bb->index], j, i);
645 	    }
646 	}
647       if (targetm.mode_switching.entry && targetm.mode_switching.exit)
648 	{
649 	  int mode = targetm.mode_switching.entry (e);
650 
651 	  info[post_entry->index].mode_out =
652 	    info[post_entry->index].mode_in = no_mode;
653 	  if (pre_exit)
654 	    {
655 	      info[pre_exit->index].mode_out =
656 		info[pre_exit->index].mode_in = no_mode;
657 	    }
658 
659 	  if (mode != no_mode)
660 	    {
661 	      bb = post_entry;
662 
663 	      /* By always making this nontransparent, we save
664 		 an extra check in make_preds_opaque.  We also
665 		 need this to avoid confusing pre_edge_lcm when
666 		 antic is cleared but transp and comp are set.  */
667 	      for (i = 0; i < no_mode; i++)
668 		clear_mode_bit (transp[bb->index], j, i);
669 
670 	      /* Insert a fake computing definition of MODE into entry
671 		 blocks which compute no mode. This represents the mode on
672 		 entry.  */
673 	      info[bb->index].computing = mode;
674 
675 	      if (pre_exit)
676 		info[pre_exit->index].seginfo->mode =
677 		  targetm.mode_switching.exit (e);
678 	    }
679 	}
680 
681       /* Set the anticipatable and computing arrays.  */
682       for (i = 0; i < no_mode; i++)
683 	{
684 	  int m = targetm.mode_switching.priority (entity_map[j], i);
685 
686 	  FOR_EACH_BB_FN (bb, cfun)
687 	    {
688 	      if (info[bb->index].seginfo->mode == m)
689 		set_mode_bit (antic[bb->index], j, m);
690 
691 	      if (info[bb->index].computing == m)
692 		set_mode_bit (comp[bb->index], j, m);
693 	    }
694 	}
695     }
696 
697   /* Calculate the optimal locations for the
698      placement mode switches to modes with priority I.  */
699 
700   FOR_EACH_BB_FN (bb, cfun)
701     bitmap_not (kill[bb->index], transp[bb->index]);
702 
703   edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic,
704 				kill, avin, avout, &insert, &del);
705 
706   for (j = n_entities - 1; j >= 0; j--)
707     {
708       int no_mode = num_modes[entity_map[j]];
709 
710       /* Insert all mode sets that have been inserted by lcm.  */
711 
712       for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--)
713 	{
714 	  edge eg = INDEX_EDGE (edge_list, ed);
715 
716 	  eg->aux = (void *)(intptr_t)-1;
717 
718 	  for (i = 0; i < no_mode; i++)
719 	    {
720 	      int m = targetm.mode_switching.priority (entity_map[j], i);
721 	      if (mode_bit_p (insert[ed], j, m))
722 		{
723 		  eg->aux = (void *)(intptr_t)m;
724 		  break;
725 		}
726 	    }
727 	}
728 
729       FOR_EACH_BB_FN (bb, cfun)
730 	{
731 	  struct bb_info *info = bb_info[j];
732 	  int last_mode = no_mode;
733 
734 	  /* intialize mode in availability for bb.  */
735 	  for (i = 0; i < no_mode; i++)
736 	    if (mode_bit_p (avout[bb->index], j, i))
737 	      {
738 		if (last_mode == no_mode)
739 		  last_mode = i;
740 		if (last_mode != i)
741 		  {
742 		    last_mode = no_mode;
743 		    break;
744 		  }
745 	      }
746 	  info[bb->index].mode_out = last_mode;
747 
748 	  /* intialize mode out availability for bb.  */
749 	  last_mode = no_mode;
750 	  for (i = 0; i < no_mode; i++)
751 	    if (mode_bit_p (avin[bb->index], j, i))
752 	      {
753 		if (last_mode == no_mode)
754 		  last_mode = i;
755 		if (last_mode != i)
756 		  {
757 		    last_mode = no_mode;
758 		    break;
759 		  }
760 	      }
761 	  info[bb->index].mode_in = last_mode;
762 
763 	  for (i = 0; i < no_mode; i++)
764 	    if (mode_bit_p (del[bb->index], j, i))
765 	      info[bb->index].seginfo->mode = no_mode;
766 	}
767 
768       /* Now output the remaining mode sets in all the segments.  */
769 
770       /* In case there was no mode inserted. the mode information on the edge
771 	 might not be complete.
772 	 Update mode info on edges and commit pending mode sets.  */
773       need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]);
774 
775       /* Reset modes for next entity.  */
776       clear_aux_for_edges ();
777 
778       FOR_EACH_BB_FN (bb, cfun)
779 	{
780 	  struct seginfo *ptr, *next;
781 	  int cur_mode = bb_info[j][bb->index].mode_in;
782 
783 	  for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next)
784 	    {
785 	      next = ptr->next;
786 	      if (ptr->mode != no_mode)
787 		{
788 		  rtx_insn *mode_set;
789 
790 		  rtl_profile_for_bb (bb);
791 		  start_sequence ();
792 
793 		  targetm.mode_switching.emit (entity_map[j], ptr->mode,
794 					       cur_mode, ptr->regs_live);
795 		  mode_set = get_insns ();
796 		  end_sequence ();
797 
798 		  /* modes kill each other inside a basic block.  */
799 		  cur_mode = ptr->mode;
800 
801 		  /* Insert MODE_SET only if it is nonempty.  */
802 		  if (mode_set != NULL_RTX)
803 		    {
804 		      emitted = true;
805 		      if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr))
806 			/* We need to emit the insns in a FIFO-like manner,
807 			   i.e. the first to be emitted at our insertion
808 			   point ends up first in the instruction steam.
809 			   Because we made sure that NOTE_INSN_BASIC_BLOCK is
810 			   only used for initially empty basic blocks, we
811 			   can achieve this by appending at the end of
812 			   the block.  */
813 			emit_insn_after
814 			  (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr)));
815 		      else
816 			emit_insn_before (mode_set, ptr->insn_ptr);
817 		    }
818 
819 		  default_rtl_profile ();
820 		}
821 
822 	      free (ptr);
823 	    }
824 	}
825 
826       free (bb_info[j]);
827     }
828 
829   free_edge_list (edge_list);
830 
831   /* Finished. Free up all the things we've allocated.  */
832   sbitmap_vector_free (del);
833   sbitmap_vector_free (insert);
834   sbitmap_vector_free (kill);
835   sbitmap_vector_free (antic);
836   sbitmap_vector_free (transp);
837   sbitmap_vector_free (comp);
838   sbitmap_vector_free (avin);
839   sbitmap_vector_free (avout);
840 
841   if (need_commit)
842     commit_edge_insertions ();
843 
844   if (targetm.mode_switching.entry && targetm.mode_switching.exit)
845     cleanup_cfg (CLEANUP_NO_INSN_DEL);
846   else if (!need_commit && !emitted)
847     return 0;
848 
849   return 1;
850 }
851 
852 #endif /* OPTIMIZE_MODE_SWITCHING */
853 
854 namespace {
855 
856 const pass_data pass_data_mode_switching =
857 {
858   RTL_PASS, /* type */
859   "mode_sw", /* name */
860   OPTGROUP_NONE, /* optinfo_flags */
861   TV_MODE_SWITCH, /* tv_id */
862   0, /* properties_required */
863   0, /* properties_provided */
864   0, /* properties_destroyed */
865   0, /* todo_flags_start */
866   TODO_df_finish, /* todo_flags_finish */
867 };
868 
869 class pass_mode_switching : public rtl_opt_pass
870 {
871 public:
872   pass_mode_switching (gcc::context *ctxt)
873     : rtl_opt_pass (pass_data_mode_switching, ctxt)
874   {}
875 
876   /* opt_pass methods: */
877   /* The epiphany backend creates a second instance of this pass, so we need
878      a clone method.  */
879   opt_pass * clone () { return new pass_mode_switching (m_ctxt); }
880   virtual bool gate (function *)
881     {
882 #ifdef OPTIMIZE_MODE_SWITCHING
883       return true;
884 #else
885       return false;
886 #endif
887     }
888 
889   virtual unsigned int execute (function *)
890     {
891 #ifdef OPTIMIZE_MODE_SWITCHING
892       optimize_mode_switching ();
893 #endif /* OPTIMIZE_MODE_SWITCHING */
894       return 0;
895     }
896 
897 }; // class pass_mode_switching
898 
899 } // anon namespace
900 
901 rtl_opt_pass *
902 make_pass_mode_switching (gcc::context *ctxt)
903 {
904   return new pass_mode_switching (ctxt);
905 }
906