xref: /qemu/target/hexagon/imported/mmvec/macros.def (revision c5955f4f)
1/*
2 *  Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
3 *
4 *  This program is free software; you can redistribute it and/or modify
5 *  it under the terms of the GNU General Public License as published by
6 *  the Free Software Foundation; either version 2 of the License, or
7 *  (at your option) any later version.
8 *
9 *  This program is distributed in the hope that it will be useful,
10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 *  GNU General Public License for more details.
13 *
14 *  You should have received a copy of the GNU General Public License
15 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18DEF_MACRO(fDUMPQ,
19	do {
20		printf(STR ":" #REG ": 0x%016llx\n",REG.ud[0]);
21	} while (0),
22	()
23)
24
25DEF_MACRO(fUSE_LOOKUP_ADDRESS_BY_REV,
26	PROC->arch_proc_options->mmvec_use_full_va_for_lookup,
27	()
28)
29
30DEF_MACRO(fUSE_LOOKUP_ADDRESS,
31	1,
32	()
33)
34
35DEF_MACRO(fNOTQ,
36	({mmqreg_t _ret = {0}; int _i_; for (_i_ = 0; _i_ < fVECSIZE()/64; _i_++) _ret.ud[_i_] = ~VAL.ud[_i_]; _ret;}),
37	()
38)
39
40DEF_MACRO(fGETQBITS,
41	((MASK) & (REG.w[(BITNO)>>5] >> ((BITNO) & 0x1f))),
42	()
43)
44
45DEF_MACRO(fGETQBIT,
46	fGETQBITS(REG,1,1,BITNO),
47	()
48)
49
50DEF_MACRO(fGENMASKW,
51	(((fGETQBIT(QREG,(IDX*4+0)) ? 0xFF : 0x0) << 0)
52	|((fGETQBIT(QREG,(IDX*4+1)) ? 0xFF : 0x0) << 8)
53	|((fGETQBIT(QREG,(IDX*4+2)) ? 0xFF : 0x0) << 16)
54	|((fGETQBIT(QREG,(IDX*4+3)) ? 0xFF : 0x0) << 24)),
55	()
56)
57DEF_MACRO(fGET10BIT,
58	{
59		COE = (((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) | fGETUBYTE(POS,VAL)) << 6);
60		COE >>= 6;
61	},
62	()
63)
64
65DEF_MACRO(fVMAX,
66	(X>Y) ? X : Y,
67	()
68)
69
70
71DEF_MACRO(fGETNIBBLE,
72    ( fSXTN(4,8,(SRC >> (4*IDX)) & 0xF) ),
73    ()
74)
75
76DEF_MACRO(fGETCRUMB,
77    ( fSXTN(2,8,(SRC >> (2*IDX)) & 0x3) ),
78    ()
79)
80
81DEF_MACRO(fGETCRUMB_SYMMETRIC,
82    ( (fGETCRUMB(IDX,SRC)>=0 ? (2-fGETCRUMB(IDX,SRC)) : fGETCRUMB(IDX,SRC) ) ),
83    ()
84)
85
86#define ZERO_OFFSET_2B +
87
88DEF_MACRO(fGENMASKH,
89	(((fGETQBIT(QREG,(IDX*2+0)) ? 0xFF : 0x0) << 0)
90	|((fGETQBIT(QREG,(IDX*2+1)) ? 0xFF : 0x0) << 8)),
91	()
92)
93
94DEF_MACRO(fGETMASKW,
95	(VREG.w[IDX] & fGENMASKW((QREG),IDX)),
96	()
97)
98
99DEF_MACRO(fGETMASKH,
100	(VREG.h[IDX] & fGENMASKH((QREG),IDX)),
101	()
102)
103
104DEF_MACRO(fCONDMASK8,
105	(fGETQBIT(QREG,IDX) ? (YESVAL) : (NOVAL)),
106	()
107)
108
109DEF_MACRO(fCONDMASK16,
110	((fGENMASKH(QREG,IDX) & (YESVAL)) | (fGENMASKH(fNOTQ(QREG),IDX) & (NOVAL))),
111	()
112)
113
114DEF_MACRO(fCONDMASK32,
115	((fGENMASKW(QREG,IDX) & (YESVAL)) | (fGENMASKW(fNOTQ(QREG),IDX) & (NOVAL))),
116	()
117)
118
119
120DEF_MACRO(fSETQBITS,
121	do {
122		size4u_t __TMP = (VAL);
123		REG.w[(BITNO)>>5] &= ~((MASK) << ((BITNO) & 0x1f));
124		REG.w[(BITNO)>>5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f));
125	} while (0),
126	()
127)
128
129DEF_MACRO(fSETQBIT,
130	fSETQBITS(REG,1,1,BITNO,VAL),
131	()
132)
133
134DEF_MACRO(fVBYTES,
135	(fVECSIZE()),
136	()
137)
138
139DEF_MACRO(fVHALVES,
140	(fVECSIZE()/2),
141	()
142)
143
144DEF_MACRO(fVWORDS,
145	(fVECSIZE()/4),
146	()
147)
148
149DEF_MACRO(fVDWORDS,
150	(fVECSIZE()/8),
151	()
152)
153
154DEF_MACRO(fVALIGN,
155    ( ADDR = ADDR & ~(LOG2_ALIGNMENT-1)),
156    ()
157)
158
159DEF_MACRO(fVLASTBYTE,
160    ( ADDR = ADDR | (LOG2_ALIGNMENT-1)),
161    ()
162)
163
164
165DEF_MACRO(fVELEM,
166    ((fVECSIZE()*8)/WIDTH),
167    ()
168)
169
170DEF_MACRO(fVECLOGSIZE,
171    (mmvec_current_veclogsize(thread)),
172    ()
173)
174
175DEF_MACRO(fVECSIZE,
176    (1<<fVECLOGSIZE()),
177    ()
178)
179
180DEF_MACRO(fSWAPB,
181    {
182		size1u_t tmp = A;
183		A = B;
184		B = tmp;
185	},
186    /* NOTHING */
187)
188
189DEF_MACRO(
190	fVZERO,
191	mmvec_zero_vector(),
192	()
193)
194
195DEF_MACRO(
196    fNEWVREG,
197    ((THREAD2STRUCT->VRegs_updated & (((VRegMask)1)<<VNUM)) ? THREAD2STRUCT->future_VRegs[VNUM] : mmvec_zero_vector()),
198    (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY)
199)
200
201DEF_MACRO(
202	fV_AL_CHECK,
203	if ((EA) & (MASK)) {
204		warn("aligning misaligned vector. PC=%08x EA=%08x",thread->Regs[REG_PC],(EA));
205	},
206	()
207)
208DEF_MACRO(fSCATTER_INIT,
209    {
210    mem_vector_scatter_init(thread, insn,   REGION_START, LENGTH, ELEMENT_SIZE);
211	if (EXCEPTION_DETECTED) return;
212    },
213    (A_STORE,A_MEMLIKE,A_RESTRICT_SLOT0ONLY)
214)
215
216DEF_MACRO(fGATHER_INIT,
217    {
218    mem_vector_gather_init(thread, insn,   REGION_START, LENGTH, ELEMENT_SIZE);
219	if (EXCEPTION_DETECTED) return;
220    },
221    (A_LOAD,A_MEMLIKE,A_RESTRICT_SLOT1ONLY)
222)
223
224DEF_MACRO(fSCATTER_FINISH,
225    {
226	if (EXCEPTION_DETECTED) return;
227    mem_vector_scatter_finish(thread, insn, OP);
228    },
229    ()
230)
231
232DEF_MACRO(fGATHER_FINISH,
233    {
234	if (EXCEPTION_DETECTED) return;
235    mem_vector_gather_finish(thread, insn);
236    },
237    ()
238)
239
240
241DEF_MACRO(CHECK_VTCM_PAGE,
242     {
243        int slot = insn->slot;
244        paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
245        pa = pa & ~(ALIGNMENT-1);
246        FLAG = (pa < (thread->mem_access[slot].paddr+LENGTH));
247     },
248    ()
249)
250DEF_MACRO(COUNT_OUT_OF_BOUNDS,
251     {
252        if (!FLAG)
253        {
254               THREAD2STRUCT->vtcm_log.oob_access += SIZE;
255               warn("Scatter/Gather out of bounds of region");
256        }
257     },
258    ()
259)
260
261DEF_MACRO(fLOG_SCATTER_OP,
262    {
263        // Log the size and indicate that the extension ext.c file needs to increment right before memory write
264        THREAD2STRUCT->vtcm_log.op = 1;
265        THREAD2STRUCT->vtcm_log.op_size = SIZE;
266    },
267    ()
268)
269
270
271
272DEF_MACRO(fVLOG_VTCM_WORD_INCREMENT,
273    {
274        int slot = insn->slot;
275        int log_bank = 0;
276        int log_byte =0;
277        paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
278        paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
279        for(int i0 = 0; i0 < 4; i0++)
280        {
281            log_byte =  ((OFFSET>=0)&&((pa+i0)<=pa_high));
282            log_bank |= (log_byte<<i0);
283            LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[4*IDX+i0],4*IDX+i0);
284        }
285        { LOG_VTCM_BANK(pa, log_bank, IDX); }
286    },
287    ()
288)
289
290DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT,
291    {
292        int slot = insn->slot;
293        int log_bank = 0;
294        int log_byte = 0;
295        paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
296        paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
297        for(int i0 = 0; i0 < 2; i0++) {
298            log_byte =  ((OFFSET>=0)&&((pa+i0)<=pa_high));
299            log_bank |= (log_byte<<i0);
300            LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
301        }
302        { LOG_VTCM_BANK(pa, log_bank,IDX); }
303    },
304    ()
305)
306
307DEF_MACRO(fVLOG_VTCM_HALFWORD_INCREMENT_DV,
308    {
309        int slot = insn->slot;
310        int log_bank = 0;
311        int log_byte = 0;
312        paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1));
313        paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
314        for(int i0 = 0; i0 < 2; i0++) {
315            log_byte =  ((OFFSET>=0)&&((pa+i0)<=pa_high));
316            log_bank |= (log_byte<<i0);
317            LOG_VTCM_BYTE(pa+i0,log_byte,INC.ub[2*IDX+i0],2*IDX+i0);
318        }
319        { LOG_VTCM_BANK(pa, log_bank,(2*IDX2+IDX_H));}
320    },
321    ()
322)
323
324
325
326DEF_MACRO(GATHER_FUNCTION,
327{
328        int slot = insn->slot;
329        int i0;
330        paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
331        paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
332        int log_bank = 0;
333        int log_byte = 0;
334        for(i0 = 0; i0 < ELEMENT_SIZE; i0++)
335        {
336            log_byte =  ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
337            log_bank |= (log_byte<<i0);
338            size1u_t B  = sim_mem_read1(thread->system_ptr, thread->threadId, thread->mem_access[slot].paddr+OFFSET+i0);
339            THREAD2STRUCT->tmp_VRegs[0].ub[ELEMENT_SIZE*IDX+i0] = B;
340            LOG_VTCM_BYTE(pa+i0,log_byte,B,ELEMENT_SIZE*IDX+i0);
341        }
342        LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
343},
344()
345)
346
347
348
349DEF_MACRO(fVLOG_VTCM_GATHER_WORD,
350    {
351		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, 1);
352    },
353    ()
354)
355DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD,
356    {
357		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, 1);
358    },
359    ()
360)
361DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD_DV,
362    {
363		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1);
364    },
365    ()
366)
367DEF_MACRO(fVLOG_VTCM_GATHER_WORDQ,
368    {
369		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0));
370    },
371    ()
372)
373DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ,
374    {
375		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0));
376    },
377    ()
378)
379
380DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ_DV,
381    {
382		GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0));
383    },
384    ()
385)
386
387
388DEF_MACRO(DEBUG_LOG_ADDR,
389    {
390
391        if (thread->processor_ptr->arch_proc_options->mmvec_network_addr_log2)
392        {
393
394            int slot = insn->slot;
395            paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
396        }
397    },
398    ()
399)
400
401
402
403
404
405
406
407DEF_MACRO(SCATTER_OP_WRITE_TO_MEM,
408    {
409        for (int i = 0; i < mmvecx->vtcm_log.size; i+=sizeof(TYPE))
410        {
411            if ( mmvecx->vtcm_log.mask.ub[i] != 0) {
412                TYPE dst = 0;
413                TYPE inc = 0;
414                for(int j = 0; j < sizeof(TYPE); j++) {
415                    dst |= (sim_mem_read1(thread->system_ptr, thread->threadId, mmvecx->vtcm_log.pa[i+j]) << (8*j));
416                    inc |= mmvecx->vtcm_log.data.ub[j+i] << (8*j);
417
418                    mmvecx->vtcm_log.mask.ub[j+i] = 0;
419                    mmvecx->vtcm_log.data.ub[j+i] = 0;
420                    mmvecx->vtcm_log.offsets.ub[j+i] = 0;
421                }
422                dst += inc;
423                for(int j = 0; j < sizeof(TYPE); j++) {
424                    sim_mem_write1(thread->system_ptr,thread->threadId, mmvecx->vtcm_log.pa[i+j], (dst >> (8*j))& 0xFF );
425                }
426        }
427
428    }
429    },
430    ()
431)
432
433DEF_MACRO(SCATTER_FUNCTION,
434{
435        int slot = insn->slot;
436        int i0;
437        paddr_t pa = thread->mem_access[slot].paddr+OFFSET;
438        paddr_t pa_high = thread->mem_access[slot].paddr+LEN;
439        int log_bank = 0;
440        int log_byte = 0;
441        for(i0 = 0; i0 < ELEMENT_SIZE; i0++) {
442            log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL;
443            log_bank |= (log_byte<<i0);
444            LOG_VTCM_BYTE(pa+i0,log_byte,IN.ub[ELEMENT_SIZE*IDX+i0],ELEMENT_SIZE*IDX+i0);
445        }
446        LOG_VTCM_BANK(pa, log_bank,BANK_IDX);
447
448},
449()
450)
451
452DEF_MACRO(fVLOG_VTCM_HALFWORD,
453    {
454		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX, 1, IN);
455    },
456    ()
457)
458DEF_MACRO(fVLOG_VTCM_WORD,
459    {
460		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX, 1, IN);
461    },
462    ()
463)
464
465DEF_MACRO(fVLOG_VTCM_HALFWORDQ,
466    {
467		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0), IN);
468    },
469    ()
470)
471DEF_MACRO(fVLOG_VTCM_WORDQ,
472    {
473		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0), IN);
474    },
475    ()
476)
477
478
479
480
481
482DEF_MACRO(fVLOG_VTCM_HALFWORD_DV,
483    {
484		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1, IN);
485    },
486    ()
487)
488
489DEF_MACRO(fVLOG_VTCM_HALFWORDQ_DV,
490    {
491		SCATTER_FUNCTION (EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0), IN);
492    },
493    ()
494)
495
496
497
498
499
500
501DEF_MACRO(fSTORERELEASE,
502    {
503        fV_AL_CHECK(EA,fVECSIZE()-1);
504
505        mem_store_release(thread, insn, fVECSIZE(), EA&~(fVECSIZE()-1), EA, TYPE, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
506    },
507	(A_STORE,A_MEMLIKE)
508)
509
510DEF_MACRO(fVFETCH_AL,
511    {
512    fV_AL_CHECK(EA,fVECSIZE()-1);
513    mem_fetch_vector(thread, insn, EA&~(fVECSIZE()-1), insn->slot, fVECSIZE());
514    },
515    (A_LOAD,A_MEMLIKE)
516)
517
518
519DEF_MACRO(fLOADMMV_AL,
520    {
521    fV_AL_CHECK(EA,ALIGNMENT-1);
522	thread->last_pkt->double_access_vec = 0;
523    mem_load_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &DST.ub[0], LEN, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
524    },
525    (A_LOAD,A_MEMLIKE)
526)
527
528DEF_MACRO(fLOADMMV,
529	fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST),
530	()
531)
532
533DEF_MACRO(fLOADMMVQ,
534	do {
535		int __i;
536		fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
537		fVFOREACH(8,__i) if (!fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
538	} while (0),
539	()
540)
541
542DEF_MACRO(fLOADMMVNQ,
543	do {
544		int __i;
545		fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
546		fVFOREACH(8,__i) if (fGETQBIT(QVAL,__i)) DST.b[__i] = 0;
547	} while (0),
548	()
549)
550
551DEF_MACRO(fLOADMMVU_AL,
552    {
553    size4u_t size2 = (EA)&(ALIGNMENT-1);
554    size4u_t size1 = LEN-size2;
555	thread->last_pkt->double_access_vec = 1;
556    mem_load_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &DST.ub[size1], size2, fUSE_LOOKUP_ADDRESS());
557    mem_load_vector_oddva(thread, insn, EA, EA,/* slot */ 0, size1, &DST.ub[0], size1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
558    },
559    (A_LOAD,A_MEMLIKE)
560)
561
562DEF_MACRO(fLOADMMVU,
563	{
564		/* if address happens to be aligned, only do aligned load */
565        thread->last_pkt->pkt_has_vtcm_access = 0;
566        thread->last_pkt->pkt_access_count = 0;
567		if ( (EA & (fVECSIZE()-1)) == 0) {
568            thread->last_pkt->pkt_has_vmemu_access = 0;
569			thread->last_pkt->double_access = 0;
570
571			fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST);
572		} else {
573            thread->last_pkt->pkt_has_vmemu_access = 1;
574			thread->last_pkt->double_access = 1;
575
576			fLOADMMVU_AL(EA,fVECSIZE(),fVECSIZE(),DST);
577		}
578	},
579	()
580)
581
582DEF_MACRO(fSTOREMMV_AL,
583    {
584    fV_AL_CHECK(EA,ALIGNMENT-1);
585    mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
586    },
587    (A_STORE,A_MEMLIKE)
588)
589
590DEF_MACRO(fSTOREMMV,
591	fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC),
592	()
593)
594
595DEF_MACRO(fSTOREMMVQ_AL,
596    do {
597	mmvector_t maskvec;
598	int i;
599	for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
600	mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
601    } while (0),
602    (A_STORE,A_MEMLIKE)
603)
604
605DEF_MACRO(fSTOREMMVQ,
606	fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
607	()
608)
609
610DEF_MACRO(fSTOREMMVNQ_AL,
611    {
612	mmvector_t maskvec;
613	int i;
614	for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
615        fV_AL_CHECK(EA,ALIGNMENT-1);
616	mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
617    },
618    (A_STORE,A_MEMLIKE)
619)
620
621DEF_MACRO(fSTOREMMVNQ,
622	fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK),
623	()
624)
625
626DEF_MACRO(fSTOREMMVU_AL,
627    {
628    size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
629    size4u_t size2;
630    if (size1>LEN) size1 = LEN;
631    size2 = LEN-size1;
632    mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], 0, 0, fUSE_LOOKUP_ADDRESS());
633    mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
634    },
635    (A_STORE,A_MEMLIKE)
636)
637
638DEF_MACRO(fSTOREMMVU,
639	{
640        thread->last_pkt->pkt_has_vtcm_access = 0;
641        thread->last_pkt->pkt_access_count = 0;
642		if ( (EA & (fVECSIZE()-1)) == 0) {
643			thread->last_pkt->double_access = 0;
644			fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
645		} else {
646			thread->last_pkt->double_access = 1;
647            thread->last_pkt->pkt_has_vmemu_access = 1;
648			fSTOREMMVU_AL(EA,fVECSIZE(),fVECSIZE(),SRC);
649		}
650	},
651	()
652)
653
654DEF_MACRO(fSTOREMMVQU_AL,
655    {
656	size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
657	size4u_t size2;
658	mmvector_t maskvec;
659	int i;
660	for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
661	if (size1>LEN) size1 = LEN;
662	size2 = LEN-size1;
663	mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(),/* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 0, fUSE_LOOKUP_ADDRESS());
664	mem_store_vector_oddva(thread, insn, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
665    },
666    (A_STORE,A_MEMLIKE)
667)
668
669DEF_MACRO(fSTOREMMVQU,
670	{
671        thread->last_pkt->pkt_has_vtcm_access = 0;
672        thread->last_pkt->pkt_access_count = 0;
673		if ( (EA & (fVECSIZE()-1)) == 0) {
674			thread->last_pkt->double_access = 0;
675			fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
676		} else {
677			thread->last_pkt->double_access = 1;
678            thread->last_pkt->pkt_has_vmemu_access = 1;
679			fSTOREMMVQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
680		}
681	},
682	()
683)
684
685DEF_MACRO(fSTOREMMVNQU_AL,
686    {
687	size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1));
688	size4u_t size2;
689	mmvector_t maskvec;
690	int i;
691	for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i);
692	if (size1>LEN) size1 = LEN;
693	size2 = LEN-size1;
694	mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 1, fUSE_LOOKUP_ADDRESS());
695	mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr));
696    },
697    (A_STORE,A_MEMLIKE)
698)
699
700DEF_MACRO(fSTOREMMVNQU,
701	{
702        thread->last_pkt->pkt_has_vtcm_access = 0;
703        thread->last_pkt->pkt_access_count = 0;
704		if ( (EA & (fVECSIZE()-1)) == 0) {
705			thread->last_pkt->double_access = 0;
706			fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
707		} else {
708			thread->last_pkt->double_access = 1;
709            thread->last_pkt->pkt_has_vmemu_access = 1;
710			fSTOREMMVNQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK);
711		}
712	},
713	()
714)
715
716
717
718
719DEF_MACRO(fVFOREACH,
720    for (VAR = 0; VAR < fVELEM(WIDTH); VAR++),
721    /* NOTHING */
722)
723
724DEF_MACRO(fVARRAY_ELEMENT_ACCESS,
725    ARRAY.v[(INDEX) / (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))],
726    ()
727)
728
729DEF_MACRO(fVNEWCANCEL,
730	do { THREAD2STRUCT->VRegs_select &= ~(1<<(REGNUM)); } while (0),
731	()
732)
733
734DEF_MACRO(fTMPVDATA,
735	mmvec_vtmp_data(thread),
736	(A_CVI)
737)
738
739DEF_MACRO(fVSATDW,
740    fVSATW( ( ( ((long long)U)<<32 ) | fZXTN(32,64,V) ) ),
741    /* attribs */
742)
743
744DEF_MACRO(fVASL_SATHI,
745    fVSATW(((U)<<1) | ((V)>>31)),
746    /* attribs */
747)
748
749DEF_MACRO(fVUADDSAT,
750	fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U)  + fZXTN(WIDTH, 2*WIDTH, V)),
751	/* attribs */
752)
753
754DEF_MACRO(fVSADDSAT,
755	fVSATN(  WIDTH, fSXTN(WIDTH, 2*WIDTH, U)  + fSXTN(WIDTH, 2*WIDTH, V)),
756	/* attribs */
757)
758
759DEF_MACRO(fVUSUBSAT,
760	fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U)  - fZXTN(WIDTH, 2*WIDTH, V)),
761	/* attribs */
762)
763
764DEF_MACRO(fVSSUBSAT,
765	fVSATN(  WIDTH, fSXTN(WIDTH, 2*WIDTH, U)  - fSXTN(WIDTH, 2*WIDTH, V)),
766	/* attribs */
767)
768
769DEF_MACRO(fVAVGU,
770	((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V))>>1),
771	/* attribs */
772)
773
774DEF_MACRO(fVAVGURND,
775	((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)+1)>>1),
776	/* attribs */
777)
778
779DEF_MACRO(fVNAVGU,
780	((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V))>>1),
781	/* attribs */
782)
783
784DEF_MACRO(fVNAVGURNDSAT,
785	fVSATUN(WIDTH,((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)+1)>>1)),
786	/* attribs */
787)
788
789DEF_MACRO(fVAVGS,
790	((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V))>>1),
791	/* attribs */
792)
793
794DEF_MACRO(fVAVGSRND,
795	((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
796	/* attribs */
797)
798
799DEF_MACRO(fVNAVGS,
800	((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V))>>1),
801	/* attribs */
802)
803
804DEF_MACRO(fVNAVGSRND,
805	((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1),
806	/* attribs */
807)
808
809DEF_MACRO(fVNAVGSRNDSAT,
810	fVSATN(WIDTH,((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1)),
811	/* attribs */
812)
813
814
815DEF_MACRO(fVNOROUND,
816	VAL,
817	/* NOTHING */
818)
819DEF_MACRO(fVNOSAT,
820	VAL,
821	/* NOTHING */
822)
823
824DEF_MACRO(fVROUND,
825	((VAL) + (((SHAMT)>0)?(1LL<<((SHAMT)-1)):0)),
826	/* NOTHING */
827)
828
829DEF_MACRO(fCARRY_FROM_ADD32,
830	(((fZXTN(32,64,A)+fZXTN(32,64,B)+C) >> 32) & 1),
831	/* NOTHING */
832)
833
834DEF_MACRO(fUARCH_NOTE_PUMP_4X,
835	,
836	()
837)
838
839DEF_MACRO(fUARCH_NOTE_PUMP_2X,
840	,
841	()
842)
843