Lines Matching refs:ins

78 mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)  in mir_pack_mod()  argument
80 bool integer = midgard_is_integer_op(ins->op); in mir_pack_mod()
81 unsigned base_size = max_bitsize_for_alu(ins); in mir_pack_mod()
82 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]); in mir_pack_mod()
86 mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) : in mir_pack_mod()
87 ((ins->src_abs[i] << 0) | in mir_pack_mod()
88 ((ins->src_neg[i] << 1))); in mir_pack_mod()
123 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins) in vector_to_scalar_alu() argument
125 bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32; in vector_to_scalar_alu()
127 bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16; in vector_to_scalar_alu()
128 bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16; in vector_to_scalar_alu()
129 unsigned comp = component_from_mask(ins->mask); in vector_to_scalar_alu()
132 mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]), in vector_to_scalar_alu()
133 mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp]) in vector_to_scalar_alu()
155 if (ins->has_inline_constant) { in vector_to_scalar_alu()
157 int lower_11 = ins->inline_constant & ((1 << 12) - 1); in vector_to_scalar_alu()
197 mir_pack_mask_alu(midgard_instruction *ins, midgard_vector_alu *alu) in mir_pack_mask_alu() argument
199 unsigned effective = ins->mask; in mir_pack_mask_alu()
205 unsigned inst_size = max_bitsize_for_alu(ins); in mir_pack_mask_alu()
206 signed upper_shift = mir_upper_override(ins, inst_size); in mir_pack_mask_alu()
325 mir_pack_vector_srcs(midgard_instruction *ins, midgard_vector_alu *alu) in mir_pack_vector_srcs() argument
327 bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props); in mir_pack_vector_srcs()
329 unsigned base_size = max_bitsize_for_alu(ins); in mir_pack_vector_srcs()
332 if (ins->has_inline_constant && (i == 1)) in mir_pack_vector_srcs()
335 if (ins->src[i] == ~0) in mir_pack_vector_srcs()
338 unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]); in mir_pack_vector_srcs()
342 if (sz == 8 && base_size == 8 && ins->op == midgard_alu_op_imov) { in mir_pack_vector_srcs()
343 ins->outmod = midgard_outmod_keeplo; in mir_pack_vector_srcs()
348 unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i], in mir_pack_vector_srcs()
353 .mod = mir_pack_mod(ins, i, false), in mir_pack_vector_srcs()
368 mir_pack_swizzle_ldst(midgard_instruction *ins) in mir_pack_swizzle_ldst() argument
370 unsigned compsz = OP_IS_STORE(ins->op) ? in mir_pack_swizzle_ldst()
371 nir_alu_type_get_type_size(ins->src_types[0]) : in mir_pack_swizzle_ldst()
372 nir_alu_type_get_type_size(ins->dest_type); in mir_pack_swizzle_ldst()
377 unsigned v = ins->swizzle[0][c]; in mir_pack_swizzle_ldst()
384 ins->load_store.swizzle |= (v / step) << (2 * (c / step)); in mir_pack_swizzle_ldst()
386 ins->load_store.swizzle |= ((v / step) << (4 * c)) | in mir_pack_swizzle_ldst()
394 mir_pack_swizzle_tex(midgard_instruction *ins) in mir_pack_swizzle_tex() argument
400 unsigned v = ins->swizzle[i][c]; in mir_pack_swizzle_tex()
409 ins->texture.swizzle = packed; in mir_pack_swizzle_tex()
411 ins->texture.in_reg_swizzle = packed; in mir_pack_swizzle_tex()
436 midgard_instruction *ins = bundle->instructions[i]; in mir_can_run_ooo() local
438 mir_foreach_src(ins, s) { in mir_can_run_ooo()
439 if (ins->src[s] == dependency) in mir_can_run_ooo()
449 mir_pack_tex_ooo(midgard_block *block, midgard_bundle *bundle, midgard_instruction *ins) in mir_pack_tex_ooo() argument
454 if (!mir_can_run_ooo(block, bundle + count + 1, ins->dest)) in mir_pack_tex_ooo()
458 ins->texture.out_of_order = count; in mir_pack_tex_ooo()
469 midgard_pack_common_store_mask(midgard_instruction *ins) { in midgard_pack_common_store_mask() argument
470 ASSERTED unsigned comp_sz = nir_alu_type_get_type_size(ins->src_types[0]); in midgard_pack_common_store_mask()
471 unsigned bytemask = mir_bytemask(ins); in midgard_pack_common_store_mask()
474 switch (ins->op) { in midgard_pack_common_store_mask()
476 return mir_bytemask(ins) & 1; in midgard_pack_common_store_mask()
478 return mir_bytemask(ins) & 3; in midgard_pack_common_store_mask()
480 return mir_bytemask(ins); in midgard_pack_common_store_mask()
501 mir_pack_ldst_mask(midgard_instruction *ins) in mir_pack_ldst_mask() argument
503 unsigned sz = nir_alu_type_get_type_size(ins->dest_type); in mir_pack_ldst_mask()
504 unsigned packed = ins->mask; in mir_pack_ldst_mask()
506 if (OP_IS_COMMON_STORE(ins->op)) { in mir_pack_ldst_mask()
507 packed = midgard_pack_common_store_mask(ins); in mir_pack_ldst_mask()
510 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) | in mir_pack_ldst_mask()
511 ((ins->mask & 0x1) ? (0x2 | 0x1) : 0); in mir_pack_ldst_mask()
518 unsigned submask = (ins->mask >> (i * comps_per_32b)) & in mir_pack_ldst_mask()
530 ins->load_store.mask = packed; in mir_pack_ldst_mask()
534 mir_lower_inverts(midgard_instruction *ins) in mir_lower_inverts() argument
537 ins->src_invert[0], in mir_lower_inverts()
538 ins->src_invert[1], in mir_lower_inverts()
539 ins->src_invert[2] in mir_lower_inverts()
542 switch (ins->op) { in mir_lower_inverts()
548 ins->op = midgard_alu_op_inor; in mir_lower_inverts()
550 ins->op = midgard_alu_op_iandnot; in mir_lower_inverts()
558 ins->op = midgard_alu_op_inand; in mir_lower_inverts()
560 ins->op = midgard_alu_op_iornot; in mir_lower_inverts()
569 ins->op = midgard_alu_op_inxor; in mir_lower_inverts()
581 mir_lower_roundmode(midgard_instruction *ins) in mir_lower_roundmode() argument
583 if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) { in mir_lower_roundmode()
584 assert(ins->roundmode <= 0x3); in mir_lower_roundmode()
585 ins->op += ins->roundmode; in mir_lower_roundmode()
590 load_store_from_instr(midgard_instruction *ins) in load_store_from_instr() argument
592 midgard_load_store_word ldst = ins->load_store; in load_store_from_instr()
593 ldst.op = ins->op; in load_store_from_instr()
596 ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1; in load_store_from_instr()
598 ldst.reg = SSA_REG_FROM_FIXED(ins->dest); in load_store_from_instr()
604 if (OP_IS_ATOMIC(ins->op)) { in load_store_from_instr()
606 ldst.swizzle |= ins->swizzle[3][0] & 3; in load_store_from_instr()
607 ldst.swizzle |= (SSA_REG_FROM_FIXED(ins->src[3]) & 1 ? 1 : 0) << 2; in load_store_from_instr()
610 if (ins->src[1] != ~0) { in load_store_from_instr()
611 ldst.arg_reg = SSA_REG_FROM_FIXED(ins->src[1]) - REGISTER_LDST_BASE; in load_store_from_instr()
612 unsigned sz = nir_alu_type_get_type_size(ins->src_types[1]); in load_store_from_instr()
613 ldst.arg_comp = midgard_ldst_comp(ldst.arg_reg, ins->swizzle[1][0], sz); in load_store_from_instr()
616 if (ins->src[2] != ~0) { in load_store_from_instr()
617 ldst.index_reg = SSA_REG_FROM_FIXED(ins->src[2]) - REGISTER_LDST_BASE; in load_store_from_instr()
618 unsigned sz = nir_alu_type_get_type_size(ins->src_types[2]); in load_store_from_instr()
619 ldst.index_comp = midgard_ldst_comp(ldst.index_reg, ins->swizzle[2][0], sz); in load_store_from_instr()
626 texture_word_from_instr(midgard_instruction *ins) in texture_word_from_instr() argument
628 midgard_texture_word tex = ins->texture; in texture_word_from_instr()
629 tex.op = ins->op; in texture_word_from_instr()
631 unsigned src1 = ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]); in texture_word_from_instr()
634 unsigned dest = ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest); in texture_word_from_instr()
637 if (ins->src[2] != ~0) { in texture_word_from_instr()
639 .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1, in texture_word_from_instr()
641 .component = ins->swizzle[2][0] in texture_word_from_instr()
648 if (ins->src[3] != ~0) { in texture_word_from_instr()
649 unsigned x = ins->swizzle[3][0]; in texture_word_from_instr()
656 unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]); in texture_word_from_instr()
670 vector_alu_from_instr(midgard_instruction *ins) in vector_alu_from_instr() argument
673 .op = ins->op, in vector_alu_from_instr()
674 .outmod = ins->outmod, in vector_alu_from_instr()
675 .reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins)) in vector_alu_from_instr()
678 if (ins->has_inline_constant) { in vector_alu_from_instr()
682 int lower_11 = ins->inline_constant & ((1 << 12) - 1); in vector_alu_from_instr()
724 emit_branch(midgard_instruction *ins, in emit_branch() argument
731 bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT; in emit_branch()
732 bool is_conditional = ins->branch.conditional; in emit_branch()
733 bool is_inverted = ins->branch.invert_conditional; in emit_branch()
734 bool is_discard = ins->branch.target_type == TARGET_DISCARD; in emit_branch()
735 bool is_tilebuf_wait = ins->branch.target_type == TARGET_TILEBUF_WAIT; in emit_branch()
737 bool is_writeout = ins->writeout; in emit_branch()
740 int target_number = ins->branch.target_block; in emit_branch()
845 midgard_instruction *ins = bundle->instructions[i]; in emit_alu_bundle() local
848 if (ins->compact_branch) continue; in emit_alu_bundle()
851 if (ins->has_inline_constant) in emit_alu_bundle()
852 src2_reg = ins->inline_constant >> 11; in emit_alu_bundle()
853 else if (ins->src[1] != ~0) in emit_alu_bundle()
854 src2_reg = SSA_REG_FROM_FIXED(ins->src[1]); in emit_alu_bundle()
859 .src1_reg = (ins->src[0] == ~0 ? in emit_alu_bundle()
861 SSA_REG_FROM_FIXED(ins->src[0])), in emit_alu_bundle()
863 .src2_imm = ins->has_inline_constant, in emit_alu_bundle()
864 .out_reg = (ins->dest == ~0 ? in emit_alu_bundle()
866 SSA_REG_FROM_FIXED(ins->dest)), in emit_alu_bundle()
874 midgard_instruction *ins = bundle->instructions[i]; in emit_alu_bundle() local
876 if (!ins->compact_branch) { in emit_alu_bundle()
877 mir_lower_inverts(ins); in emit_alu_bundle()
878 mir_lower_roundmode(ins); in emit_alu_bundle()
881 if (midgard_is_branch_unit(ins->unit)) { in emit_alu_bundle()
882 emit_branch(ins, ctx, block, bundle, emission); in emit_alu_bundle()
883 } else if (ins->unit & UNITS_ANY_VECTOR) { in emit_alu_bundle()
884 midgard_vector_alu source = vector_alu_from_instr(ins); in emit_alu_bundle()
885 mir_pack_mask_alu(ins, &source); in emit_alu_bundle()
886 mir_pack_vector_srcs(ins, &source); in emit_alu_bundle()
890 … midgard_scalar_alu source = vector_to_scalar_alu(vector_alu_from_instr(ins), ins); in emit_alu_bundle()
913 mir_ldst_pack_offset(midgard_instruction *ins, int offset) in mir_ldst_pack_offset() argument
916 assert(!OP_IS_REG2REG_LDST(ins->op) || in mir_ldst_pack_offset()
917 ins->op == midgard_op_lea || in mir_ldst_pack_offset()
918 ins->op == midgard_op_lea_image); in mir_ldst_pack_offset()
920 if (OP_IS_UBO_READ(ins->op)) in mir_ldst_pack_offset()
921 ins->load_store.signed_offset |= PACK_LDST_UBO_OFS(offset); in mir_ldst_pack_offset()
922 else if (OP_IS_IMAGE(ins->op)) in mir_ldst_pack_offset()
923 ins->load_store.signed_offset |= PACK_LDST_ATTRIB_OFS(offset); in mir_ldst_pack_offset()
924 else if (OP_IS_SPECIAL(ins->op)) in mir_ldst_pack_offset()
925 ins->load_store.signed_offset |= PACK_LDST_SELECTOR_OFS(offset); in mir_ldst_pack_offset()
927 ins->load_store.signed_offset |= PACK_LDST_MEM_OFS(offset); in mir_ldst_pack_offset()
976 midgard_instruction *ins = bundle->instructions[i]; in emit_binary_bundle() local
977 mir_pack_ldst_mask(ins); in emit_binary_bundle()
980 if (!OP_IS_ATOMIC(ins->op)) in emit_binary_bundle()
981 mir_pack_swizzle_ldst(ins); in emit_binary_bundle()
984 unsigned offset = ins->constants.u32[0]; in emit_binary_bundle()
986 mir_ldst_pack_offset(ins, offset); in emit_binary_bundle()
1018 midgard_instruction *ins = bundle->instructions[0]; in emit_binary_bundle() local
1020 ins->texture.type = bundle->tag; in emit_binary_bundle()
1021 ins->texture.next_type = next_tag; in emit_binary_bundle()
1022 ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_NONE; /* default */ in emit_binary_bundle()
1025 if (ins->op == midgard_tex_op_barrier) { in emit_binary_bundle()
1026 ins->texture.op = ins->op; in emit_binary_bundle()
1027 util_dynarray_append(emission, midgard_texture_word, ins->texture); in emit_binary_bundle()
1031 signed override = mir_upper_override(ins, 32); in emit_binary_bundle()
1033 ins->texture.mask = override > 0 ? in emit_binary_bundle()
1034 ins->mask >> override : in emit_binary_bundle()
1035 ins->mask; in emit_binary_bundle()
1037 mir_pack_swizzle_tex(ins); in emit_binary_bundle()
1040 mir_pack_tex_ooo(block, bundle, ins); in emit_binary_bundle()
1042 unsigned osz = nir_alu_type_get_type_size(ins->dest_type); in emit_binary_bundle()
1043 unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]); in emit_binary_bundle()
1048 ins->texture.out_full = (osz == 32); in emit_binary_bundle()
1049 ins->texture.out_upper = override > 0; in emit_binary_bundle()
1050 ins->texture.in_reg_full = (isz == 32); in emit_binary_bundle()
1051 ins->texture.sampler_type = midgard_sampler_type(ins->dest_type); in emit_binary_bundle()
1052 ins->texture.outmod = ins->outmod; in emit_binary_bundle()
1054 if (mir_op_computes_derivatives(ctx->stage, ins->op)) { in emit_binary_bundle()
1055 if (ins->helper_terminate) in emit_binary_bundle()
1056 ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_KILL; in emit_binary_bundle()
1057 else if (!ins->helper_execute) in emit_binary_bundle()
1058 ins->texture.exec = MIDGARD_PARTIAL_EXECUTION_SKIP; in emit_binary_bundle()
1061 midgard_texture_word texture = texture_word_from_instr(ins); in emit_binary_bundle()