diff --git a/selfdrive/modeld/thneed/debug/decompiler/disasm-a3xx.c b/selfdrive/modeld/thneed/debug/decompiler/disasm-a3xx.c new file mode 100644 index 0000000000..1b40fb7fc0 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/disasm-a3xx.c @@ -0,0 +1,1426 @@ +/* + * Copyright (c) 2013 Rob Clark + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +//#include + +#include "util/macros.h" +#include "instr-a3xx.h" + +/* bitmask of debug flags */ +enum debug_t { + PRINT_RAW = 0x1, /* dump raw hexdump */ + PRINT_VERBOSE = 0x2, + EXPAND_REPEAT = 0x4, +}; + +static enum debug_t debug = PRINT_RAW | PRINT_VERBOSE | EXPAND_REPEAT; + +static const char *levels[] = { + "", + "\t", + "\t\t", + "\t\t\t", + "\t\t\t\t", + "\t\t\t\t\t", + "\t\t\t\t\t\t", + "\t\t\t\t\t\t\t", + "\t\t\t\t\t\t\t\t", + "\t\t\t\t\t\t\t\t\t", + "x", + "x", + "x", + "x", + "x", + "x", +}; + +static const char *component = "xyzw"; + +static const char *type[] = { + [TYPE_F16] = "f16", + [TYPE_F32] = "f32", + [TYPE_U16] = "u16", + [TYPE_U32] = "u32", + [TYPE_S16] = "s16", + [TYPE_S32] = "s32", + [TYPE_U8] = "u8", + [TYPE_S8] = "s8", +}; + +struct disasm_ctx { + FILE *out; + int level; + unsigned gpu_id; + + /* current instruction repeat flag: */ + unsigned repeat; + /* current instruction repeat indx/offset (for --expand): */ + unsigned repeatidx; + + unsigned instructions; +}; + +static const char *float_imms[] = { + "0.0", + "0.5", + "1.0", + "2.0", + "e", + "pi", + "1/pi", + "1/log2(e)", + "log2(e)", + "1/log2(10)", + "log2(10)", + "4.0", +}; + +static void print_reg(struct disasm_ctx *ctx, reg_t reg, bool full, + bool is_float, bool r, + bool c, bool im, bool neg, bool abs, bool addr_rel) +{ + const char type = c ? 'c' : 'r'; + + // XXX I prefer - and || for neg/abs, but preserving format used + // by libllvm-a3xx for easy diffing.. + + if (abs && neg) + fprintf(ctx->out, "(absneg)"); + else if (neg) + fprintf(ctx->out, "(neg)"); + else if (abs) + fprintf(ctx->out, "(abs)"); + + if (r) + fprintf(ctx->out, "(r)"); + + if (im) { + if (is_float && full && reg.iim_val < ARRAY_SIZE(float_imms)) { + fprintf(ctx->out, "(%s)", float_imms[reg.iim_val]); + } else { + fprintf(ctx->out, "%d", reg.iim_val); + } + } else if (addr_rel) { + /* I would just use %+d but trying to make it diff'able with + * libllvm-a3xx... + */ + if (reg.iim_val < 0) + fprintf(ctx->out, "%s%c", full ? "" : "h", type, -reg.iim_val); + else if (reg.iim_val > 0) + fprintf(ctx->out, "%s%c", full ? "" : "h", type, reg.iim_val); + else + fprintf(ctx->out, "%s%c", full ? "" : "h", type); + } else if ((reg.num == REG_A0) && !c) { + /* This matches libllvm output, the second (scalar) address register + * seems to be called a1.x instead of a0.y. + */ + fprintf(ctx->out, "a%d.x", reg.comp); + } else if ((reg.num == REG_P0) && !c) { + fprintf(ctx->out, "p0.%c", component[reg.comp]); + } else { + fprintf(ctx->out, "%s%c%d.%c", full ? "" : "h", type, reg.num, component[reg.comp]); + } +} + +static unsigned regidx(reg_t reg) +{ + return (4 * reg.num) + reg.comp; +} + +static reg_t idxreg(unsigned idx) +{ + return (reg_t){ + .comp = idx & 0x3, + .num = idx >> 2, + }; +} + +static void print_reg_dst(struct disasm_ctx *ctx, reg_t reg, bool full, bool addr_rel) +{ + reg = idxreg(regidx(reg) + ctx->repeatidx); + print_reg(ctx, reg, full, false, false, false, false, false, false, addr_rel); +} + +/* TODO switch to using reginfo struct everywhere, since more readable + * than passing a bunch of bools to print_reg_src + */ + +struct reginfo { + reg_t reg; + bool full; + bool r; + bool c; + bool f; /* src reg is interpreted as float, used for printing immediates */ + bool im; + bool neg; + bool abs; + bool addr_rel; +}; + +static void print_src(struct disasm_ctx *ctx, struct reginfo *info) +{ + reg_t reg = info->reg; + + if (info->r) + reg = idxreg(regidx(info->reg) + ctx->repeatidx); + + print_reg(ctx, reg, info->full, info->f, info->r, info->c, info->im, + info->neg, info->abs, info->addr_rel); +} + +//static void print_dst(struct disasm_ctx *ctx, struct reginfo *info) +//{ +// print_reg_dst(ctx, info->reg, info->full, info->addr_rel); +//} + +static void print_instr_cat0(struct disasm_ctx *ctx, instr_t *instr) +{ + static const struct { + const char *suffix; + int nsrc; + bool idx; + } brinfo[7] = { + [BRANCH_PLAIN] = { "r", 1, false }, + [BRANCH_OR] = { "rao", 2, false }, + [BRANCH_AND] = { "raa", 2, false }, + [BRANCH_CONST] = { "rac", 0, true }, + [BRANCH_ANY] = { "any", 1, false }, + [BRANCH_ALL] = { "all", 1, false }, + [BRANCH_X] = { "rax", 0, false }, + }; + instr_cat0_t *cat0 = &instr->cat0; + + switch (instr_opc(instr, ctx->gpu_id)) { + case OPC_KILL: + case OPC_PREDT: + case OPC_PREDF: + fprintf(ctx->out, " %sp0.%c", cat0->inv0 ? "!" : "", + component[cat0->comp0]); + break; + case OPC_B: + fprintf(ctx->out, "%s", brinfo[cat0->brtype].suffix); + if (brinfo[cat0->brtype].idx) { + fprintf(ctx->out, ".%u", cat0->idx); + } + if (brinfo[cat0->brtype].nsrc >= 1) { + fprintf(ctx->out, " %sp0.%c,", cat0->inv0 ? "!" : "", + component[cat0->comp0]); + } + if (brinfo[cat0->brtype].nsrc >= 2) { + fprintf(ctx->out, " %sp0.%c,", cat0->inv1 ? "!" : "", + component[cat0->comp1]); + } + fprintf(ctx->out, " #%d", cat0->a3xx.immed); + break; + case OPC_JUMP: + case OPC_CALL: + case OPC_BKT: + case OPC_GETONE: + case OPC_SHPS: + fprintf(ctx->out, " #%d", cat0->a3xx.immed); + break; + } + + if ((debug & PRINT_VERBOSE) && (cat0->dummy3|cat0->dummy4)) + fprintf(ctx->out, "\t{0: %x,%x}", cat0->dummy3, cat0->dummy4); +} + +static void print_instr_cat1(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat1_t *cat1 = &instr->cat1; + + if (cat1->ul) + fprintf(ctx->out, "(ul)"); + + if (cat1->src_type == cat1->dst_type) { + if ((cat1->src_type == TYPE_S16) && (((reg_t)cat1->dst).num == REG_A0)) { + /* special case (nmemonic?): */ + fprintf(ctx->out, "mova"); + } else { + fprintf(ctx->out, "mov.%s%s", type[cat1->src_type], type[cat1->dst_type]); + } + } else { + fprintf(ctx->out, "cov.%s%s", type[cat1->src_type], type[cat1->dst_type]); + } + + fprintf(ctx->out, " "); + + if (cat1->even) + fprintf(ctx->out, "(even)"); + + if (cat1->pos_inf) + fprintf(ctx->out, "(pos_infinity)"); + + print_reg_dst(ctx, (reg_t)(cat1->dst), type_size(cat1->dst_type) == 32, + cat1->dst_rel); + + fprintf(ctx->out, ", "); + + /* ugg, have to special case this.. vs print_reg().. */ + if (cat1->src_im) { + if (type_float(cat1->src_type)) + fprintf(ctx->out, "(%f)", cat1->fim_val); + else if (type_uint(cat1->src_type)) + fprintf(ctx->out, "0x%08x", cat1->uim_val); + else + fprintf(ctx->out, "%d", cat1->iim_val); + } else if (cat1->src_rel && !cat1->src_c) { + /* I would just use %+d but trying to make it diff'able with + * libllvm-a3xx... + */ + char type = cat1->src_rel_c ? 'c' : 'r'; + const char *full = (type_size(cat1->src_type) == 32) ? "" : "h"; + if (cat1->off < 0) + fprintf(ctx->out, "%s%c", full, type, -cat1->off); + else if (cat1->off > 0) + fprintf(ctx->out, "%s%c", full, type, cat1->off); + else + fprintf(ctx->out, "%s%c", full, type); + } else { + struct reginfo src = { + .reg = (reg_t)cat1->src, + .full = type_size(cat1->src_type) == 32, + .r = cat1->src_r, + .c = cat1->src_c, + .im = cat1->src_im, + }; + print_src(ctx, &src); + } + + if ((debug & PRINT_VERBOSE) && (cat1->must_be_0)) + fprintf(ctx->out, "\t{1: %x}", cat1->must_be_0); +} + +static void print_instr_cat2(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat2_t *cat2 = &instr->cat2; + int opc = _OPC(2, cat2->opc); + static const char *cond[] = { + "lt", + "le", + "gt", + "ge", + "eq", + "ne", + "?6?", + }; + + switch (opc) { + case OPC_CMPS_F: + case OPC_CMPS_U: + case OPC_CMPS_S: + case OPC_CMPV_F: + case OPC_CMPV_U: + case OPC_CMPV_S: + fprintf(ctx->out, ".%s", cond[cat2->cond]); + break; + } + + fprintf(ctx->out, " "); + if (cat2->ei) + fprintf(ctx->out, "(ei)"); + print_reg_dst(ctx, (reg_t)(cat2->dst), cat2->full ^ cat2->dst_half, false); + fprintf(ctx->out, ", "); + + struct reginfo src1 = { + .full = cat2->full, + .r = cat2->repeat ? cat2->src1_r : 0, + .f = is_cat2_float(opc), + .im = cat2->src1_im, + .abs = cat2->src1_abs, + .neg = cat2->src1_neg, + }; + + if (cat2->c1.src1_c) { + src1.reg = (reg_t)(cat2->c1.src1); + src1.c = true; + } else if (cat2->rel1.src1_rel) { + src1.reg = (reg_t)(cat2->rel1.src1); + src1.c = cat2->rel1.src1_c; + src1.addr_rel = true; + } else { + src1.reg = (reg_t)(cat2->src1); + } + print_src(ctx, &src1); + + struct reginfo src2 = { + .r = cat2->repeat ? cat2->src2_r : 0, + .full = cat2->full, + .f = is_cat2_float(opc), + .abs = cat2->src2_abs, + .neg = cat2->src2_neg, + .im = cat2->src2_im, + }; + switch (opc) { + case OPC_ABSNEG_F: + case OPC_ABSNEG_S: + case OPC_CLZ_B: + case OPC_CLZ_S: + case OPC_SIGN_F: + case OPC_FLOOR_F: + case OPC_CEIL_F: + case OPC_RNDNE_F: + case OPC_RNDAZ_F: + case OPC_TRUNC_F: + case OPC_NOT_B: + case OPC_BFREV_B: + case OPC_SETRM: + case OPC_CBITS_B: + /* these only have one src reg */ + break; + default: + fprintf(ctx->out, ", "); + if (cat2->c2.src2_c) { + src2.reg = (reg_t)(cat2->c2.src2); + src2.c = true; + } else if (cat2->rel2.src2_rel) { + src2.reg = (reg_t)(cat2->rel2.src2); + src2.c = cat2->rel2.src2_c; + src2.addr_rel = true; + } else { + src2.reg = (reg_t)(cat2->src2); + } + print_src(ctx, &src2); + break; + } +} + +static void print_instr_cat3(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat3_t *cat3 = &instr->cat3; + bool full = instr_cat3_full(cat3); + + fprintf(ctx->out, " "); + print_reg_dst(ctx, (reg_t)(cat3->dst), full ^ cat3->dst_half, false); + fprintf(ctx->out, ", "); + + struct reginfo src1 = { + .r = cat3->repeat ? cat3->src1_r : 0, + .full = full, + .neg = cat3->src1_neg, + }; + if (cat3->c1.src1_c) { + src1.reg = (reg_t)(cat3->c1.src1); + src1.c = true; + } else if (cat3->rel1.src1_rel) { + src1.reg = (reg_t)(cat3->rel1.src1); + src1.c = cat3->rel1.src1_c; + src1.addr_rel = true; + } else { + src1.reg = (reg_t)(cat3->src1); + } + print_src(ctx, &src1); + + fprintf(ctx->out, ", "); + struct reginfo src2 = { + .reg = (reg_t)cat3->src2, + .full = full, + .r = cat3->repeat ? cat3->src2_r : 0, + .c = cat3->src2_c, + .neg = cat3->src2_neg, + }; + print_src(ctx, &src2); + + fprintf(ctx->out, ", "); + struct reginfo src3 = { + .r = cat3->src3_r, + .full = full, + .neg = cat3->src3_neg, + }; + if (cat3->c2.src3_c) { + src3.reg = (reg_t)(cat3->c2.src3); + src3.c = true; + } else if (cat3->rel2.src3_rel) { + src3.reg = (reg_t)(cat3->rel2.src3); + src3.c = cat3->rel2.src3_c; + src3.addr_rel = true; + } else { + src3.reg = (reg_t)(cat3->src3); + } + print_src(ctx, &src3); +} + +static void print_instr_cat4(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat4_t *cat4 = &instr->cat4; + + fprintf(ctx->out, " "); + print_reg_dst(ctx, (reg_t)(cat4->dst), cat4->full ^ cat4->dst_half, false); + fprintf(ctx->out, ", "); + + struct reginfo src = { + .r = cat4->src_r, + .im = cat4->src_im, + .full = cat4->full, + .neg = cat4->src_neg, + .abs = cat4->src_abs, + }; + if (cat4->c.src_c) { + src.reg = (reg_t)(cat4->c.src); + src.c = true; + } else if (cat4->rel.src_rel) { + src.reg = (reg_t)(cat4->rel.src); + src.c = cat4->rel.src_c; + src.addr_rel = true; + } else { + src.reg = (reg_t)(cat4->src); + } + print_src(ctx, &src); + + if ((debug & PRINT_VERBOSE) && (cat4->dummy1|cat4->dummy2)) + fprintf(ctx->out, "\t{4: %x,%x}", cat4->dummy1, cat4->dummy2); +} + +static void print_instr_cat5(struct disasm_ctx *ctx, instr_t *instr) +{ + static const struct { + bool src1, src2, samp, tex; + } info[0x1f] = { + [opc_op(OPC_ISAM)] = { true, false, true, true, }, + [opc_op(OPC_ISAML)] = { true, true, true, true, }, + [opc_op(OPC_ISAMM)] = { true, false, true, true, }, + [opc_op(OPC_SAM)] = { true, false, true, true, }, + [opc_op(OPC_SAMB)] = { true, true, true, true, }, + [opc_op(OPC_SAML)] = { true, true, true, true, }, + [opc_op(OPC_SAMGQ)] = { true, false, true, true, }, + [opc_op(OPC_GETLOD)] = { true, false, true, true, }, + [opc_op(OPC_CONV)] = { true, true, true, true, }, + [opc_op(OPC_CONVM)] = { true, true, true, true, }, + [opc_op(OPC_GETSIZE)] = { true, false, false, true, }, + [opc_op(OPC_GETBUF)] = { false, false, false, true, }, + [opc_op(OPC_GETPOS)] = { true, false, false, true, }, + [opc_op(OPC_GETINFO)] = { false, false, false, true, }, + [opc_op(OPC_DSX)] = { true, false, false, false, }, + [opc_op(OPC_DSY)] = { true, false, false, false, }, + [opc_op(OPC_GATHER4R)] = { true, false, true, true, }, + [opc_op(OPC_GATHER4G)] = { true, false, true, true, }, + [opc_op(OPC_GATHER4B)] = { true, false, true, true, }, + [opc_op(OPC_GATHER4A)] = { true, false, true, true, }, + [opc_op(OPC_SAMGP0)] = { true, false, true, true, }, + [opc_op(OPC_SAMGP1)] = { true, false, true, true, }, + [opc_op(OPC_SAMGP2)] = { true, false, true, true, }, + [opc_op(OPC_SAMGP3)] = { true, false, true, true, }, + [opc_op(OPC_DSXPP_1)] = { true, false, false, false, }, + [opc_op(OPC_DSYPP_1)] = { true, false, false, false, }, + [opc_op(OPC_RGETPOS)] = { true, false, false, false, }, + [opc_op(OPC_RGETINFO)] = { false, false, false, false, }, + }; + + static const struct { + bool indirect; + bool bindless; + bool use_a1; + bool uniform; + } desc_features[8] = { + [CAT5_NONUNIFORM] = { .indirect = true, }, + [CAT5_UNIFORM] = { .indirect = true, .uniform = true, }, + [CAT5_BINDLESS_IMM] = { .bindless = true, }, + [CAT5_BINDLESS_UNIFORM] = { + .bindless = true, + .indirect = true, + .uniform = true, + }, + [CAT5_BINDLESS_NONUNIFORM] = { + .bindless = true, + .indirect = true, + }, + [CAT5_BINDLESS_A1_IMM] = { + .bindless = true, + .use_a1 = true, + }, + [CAT5_BINDLESS_A1_UNIFORM] = { + .bindless = true, + .indirect = true, + .uniform = true, + .use_a1 = true, + }, + [CAT5_BINDLESS_A1_NONUNIFORM] = { + .bindless = true, + .indirect = true, + .use_a1 = true, + }, + }; + + instr_cat5_t *cat5 = &instr->cat5; + int i; + + bool desc_indirect = + cat5->is_s2en_bindless && + desc_features[cat5->s2en_bindless.desc_mode].indirect; + bool bindless = + cat5->is_s2en_bindless && + desc_features[cat5->s2en_bindless.desc_mode].bindless; + bool use_a1 = + cat5->is_s2en_bindless && + desc_features[cat5->s2en_bindless.desc_mode].use_a1; + bool uniform = + cat5->is_s2en_bindless && + desc_features[cat5->s2en_bindless.desc_mode].uniform; + + if (cat5->is_3d) fprintf(ctx->out, ".3d"); + if (cat5->is_a) fprintf(ctx->out, ".a"); + if (cat5->is_o) fprintf(ctx->out, ".o"); + if (cat5->is_p) fprintf(ctx->out, ".p"); + if (cat5->is_s) fprintf(ctx->out, ".s"); + if (desc_indirect) fprintf(ctx->out, ".s2en"); + if (uniform) fprintf(ctx->out, ".uniform"); + + if (bindless) { + unsigned base = (cat5->s2en_bindless.base_hi << 1) | cat5->base_lo; + fprintf(ctx->out, ".base%d", base); + } + + fprintf(ctx->out, " "); + + switch (_OPC(5, cat5->opc)) { + case OPC_DSXPP_1: + case OPC_DSYPP_1: + break; + default: + fprintf(ctx->out, "(%s)", type[cat5->type]); + break; + } + + fprintf(ctx->out, "("); + for (i = 0; i < 4; i++) + if (cat5->wrmask & (1 << i)) + fprintf(ctx->out, "%c", "xyzw"[i]); + fprintf(ctx->out, ")"); + + print_reg_dst(ctx, (reg_t)(cat5->dst), type_size(cat5->type) == 32, false); + + if (info[cat5->opc].src1) { + fprintf(ctx->out, ", "); + struct reginfo src = { .reg = (reg_t)(cat5->src1), .full = cat5->full }; + print_src(ctx, &src); + } + + if (cat5->is_o || info[cat5->opc].src2) { + fprintf(ctx->out, ", "); + struct reginfo src = { .reg = (reg_t)(cat5->src2), .full = cat5->full }; + print_src(ctx, &src); + } + if (cat5->is_s2en_bindless) { + if (!desc_indirect) { + if (info[cat5->opc].samp) { + if (use_a1) + fprintf(ctx->out, ", s#%d", cat5->s2en_bindless.src3); + else + fprintf(ctx->out, ", s#%d", cat5->s2en_bindless.src3 & 0xf); + } + + if (info[cat5->opc].tex && !use_a1) { + fprintf(ctx->out, ", t#%d", cat5->s2en_bindless.src3 >> 4); + } + } + } else { + if (info[cat5->opc].samp) + fprintf(ctx->out, ", s#%d", cat5->norm.samp); + if (info[cat5->opc].tex) + fprintf(ctx->out, ", t#%d", cat5->norm.tex); + } + + if (desc_indirect) { + fprintf(ctx->out, ", "); + struct reginfo src = { .reg = (reg_t)(cat5->s2en_bindless.src3), .full = bindless }; + print_src(ctx, &src); + } + + if (use_a1) + fprintf(ctx->out, ", a1.x"); + + if (debug & PRINT_VERBOSE) { + if (cat5->is_s2en_bindless) { + if ((debug & PRINT_VERBOSE) && cat5->s2en_bindless.dummy1) + fprintf(ctx->out, "\t{5: %x}", cat5->s2en_bindless.dummy1); + } else { + if ((debug & PRINT_VERBOSE) && cat5->norm.dummy1) + fprintf(ctx->out, "\t{5: %x}", cat5->norm.dummy1); + } + } +} + +static void print_instr_cat6_a3xx(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat6_t *cat6 = &instr->cat6; + char sd = 0, ss = 0; /* dst/src address space */ + bool nodst = false; + struct reginfo dst, src1, src2; + int src1off = 0, dstoff = 0; + + memset(&dst, 0, sizeof(dst)); + memset(&src1, 0, sizeof(src1)); + memset(&src2, 0, sizeof(src2)); + + switch (_OPC(6, cat6->opc)) { + case OPC_RESINFO: + case OPC_RESFMT: + dst.full = type_size(cat6->type) == 32; + src1.full = type_size(cat6->type) == 32; + src2.full = type_size(cat6->type) == 32; + break; + case OPC_L2G: + case OPC_G2L: + dst.full = true; + src1.full = true; + src2.full = true; + break; + case OPC_STG: + case OPC_STL: + case OPC_STP: + case OPC_STLW: + case OPC_STIB: + dst.full = type_size(cat6->type) == 32; + src1.full = type_size(cat6->type) == 32; + src2.full = type_size(cat6->type) == 32; + break; + default: + dst.full = type_size(cat6->type) == 32; + src1.full = true; + src2.full = true; + break; + } + + switch (_OPC(6, cat6->opc)) { + case OPC_PREFETCH: + break; + case OPC_RESINFO: + fprintf(ctx->out, ".%dd", cat6->ldgb.d + 1); + break; + case OPC_LDGB: + fprintf(ctx->out, ".%s", cat6->ldgb.typed ? "typed" : "untyped"); + fprintf(ctx->out, ".%dd", cat6->ldgb.d + 1); + fprintf(ctx->out, ".%s", type[cat6->type]); + fprintf(ctx->out, ".%d", cat6->ldgb.type_size + 1); + break; + case OPC_STGB: + case OPC_STIB: + fprintf(ctx->out, ".%s", cat6->stgb.typed ? "typed" : "untyped"); + fprintf(ctx->out, ".%dd", cat6->stgb.d + 1); + fprintf(ctx->out, ".%s", type[cat6->type]); + fprintf(ctx->out, ".%d", cat6->stgb.type_size + 1); + break; + case OPC_ATOMIC_ADD: + case OPC_ATOMIC_SUB: + case OPC_ATOMIC_XCHG: + case OPC_ATOMIC_INC: + case OPC_ATOMIC_DEC: + case OPC_ATOMIC_CMPXCHG: + case OPC_ATOMIC_MIN: + case OPC_ATOMIC_MAX: + case OPC_ATOMIC_AND: + case OPC_ATOMIC_OR: + case OPC_ATOMIC_XOR: + ss = cat6->g ? 'g' : 'l'; + fprintf(ctx->out, ".%s", cat6->ldgb.typed ? "typed" : "untyped"); + fprintf(ctx->out, ".%dd", cat6->ldgb.d + 1); + fprintf(ctx->out, ".%s", type[cat6->type]); + fprintf(ctx->out, ".%d", cat6->ldgb.type_size + 1); + fprintf(ctx->out, ".%c", ss); + break; + default: + dst.im = cat6->g && !cat6->dst_off; + fprintf(ctx->out, ".%s", type[cat6->type]); + break; + } + fprintf(ctx->out, " "); + + switch (_OPC(6, cat6->opc)) { + case OPC_STG: + sd = 'g'; + break; + case OPC_STP: + sd = 'p'; + break; + case OPC_STL: + case OPC_STLW: + sd = 'l'; + break; + + case OPC_LDG: + case OPC_LDC: + ss = 'g'; + break; + case OPC_LDP: + ss = 'p'; + break; + case OPC_LDL: + case OPC_LDLW: + case OPC_LDLV: + ss = 'l'; + break; + + case OPC_L2G: + ss = 'l'; + sd = 'g'; + break; + + case OPC_G2L: + ss = 'g'; + sd = 'l'; + break; + + case OPC_PREFETCH: + ss = 'g'; + nodst = true; + break; + } + + if ((_OPC(6, cat6->opc) == OPC_STGB) || (_OPC(6, cat6->opc) == OPC_STIB)) { + struct reginfo src3; + + memset(&src3, 0, sizeof(src3)); + + src1.reg = (reg_t)(cat6->stgb.src1); + src2.reg = (reg_t)(cat6->stgb.src2); + src2.im = cat6->stgb.src2_im; + src3.reg = (reg_t)(cat6->stgb.src3); + src3.im = cat6->stgb.src3_im; + src3.full = true; + + fprintf(ctx->out, "g[%u], ", cat6->stgb.dst_ssbo); + print_src(ctx, &src1); + fprintf(ctx->out, ", "); + print_src(ctx, &src2); + fprintf(ctx->out, ", "); + print_src(ctx, &src3); + + if (debug & PRINT_VERBOSE) + fprintf(ctx->out, " (pad0=%x, pad3=%x)", cat6->stgb.pad0, cat6->stgb.pad3); + + return; + } + + if (is_atomic(_OPC(6, cat6->opc))) { + + src1.reg = (reg_t)(cat6->ldgb.src1); + src1.im = cat6->ldgb.src1_im; + src2.reg = (reg_t)(cat6->ldgb.src2); + src2.im = cat6->ldgb.src2_im; + dst.reg = (reg_t)(cat6->ldgb.dst); + + print_src(ctx, &dst); + fprintf(ctx->out, ", "); + if (ss == 'g') { + struct reginfo src3; + memset(&src3, 0, sizeof(src3)); + + src3.reg = (reg_t)(cat6->ldgb.src3); + src3.full = true; + + /* For images, the ".typed" variant is used and src2 is + * the ivecN coordinates, ie ivec2 for 2d. + * + * For SSBOs, the ".untyped" variant is used and src2 is + * a simple dword offset.. src3 appears to be + * uvec2(offset * 4, 0). Not sure the point of that. + */ + + fprintf(ctx->out, "g[%u], ", cat6->ldgb.src_ssbo); + print_src(ctx, &src1); /* value */ + fprintf(ctx->out, ", "); + print_src(ctx, &src2); /* offset/coords */ + fprintf(ctx->out, ", "); + print_src(ctx, &src3); /* 64b byte offset.. */ + + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, " (pad0=%x, pad3=%x, mustbe0=%x)", cat6->ldgb.pad0, + cat6->ldgb.pad3, cat6->ldgb.mustbe0); + } + } else { /* ss == 'l' */ + fprintf(ctx->out, "l["); + print_src(ctx, &src1); /* simple byte offset */ + fprintf(ctx->out, "], "); + print_src(ctx, &src2); /* value */ + + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, " (src3=%x, pad0=%x, pad3=%x, mustbe0=%x)", + cat6->ldgb.src3, cat6->ldgb.pad0, + cat6->ldgb.pad3, cat6->ldgb.mustbe0); + } + } + + return; + } else if (_OPC(6, cat6->opc) == OPC_RESINFO) { + dst.reg = (reg_t)(cat6->ldgb.dst); + + print_src(ctx, &dst); + fprintf(ctx->out, ", "); + fprintf(ctx->out, "g[%u]", cat6->ldgb.src_ssbo); + + return; + } else if (_OPC(6, cat6->opc) == OPC_LDGB) { + + src1.reg = (reg_t)(cat6->ldgb.src1); + src1.im = cat6->ldgb.src1_im; + src2.reg = (reg_t)(cat6->ldgb.src2); + src2.im = cat6->ldgb.src2_im; + dst.reg = (reg_t)(cat6->ldgb.dst); + + print_src(ctx, &dst); + fprintf(ctx->out, ", "); + fprintf(ctx->out, "g[%u], ", cat6->ldgb.src_ssbo); + print_src(ctx, &src1); + fprintf(ctx->out, ", "); + print_src(ctx, &src2); + + if (debug & PRINT_VERBOSE) + fprintf(ctx->out, " (pad0=%x, pad3=%x, mustbe0=%x)", cat6->ldgb.pad0, cat6->ldgb.pad3, cat6->ldgb.mustbe0); + + return; + } else if (_OPC(6, cat6->opc) == OPC_LDG && cat6->a.src1_im && cat6->a.src2_im) { + struct reginfo src3; + + memset(&src3, 0, sizeof(src3)); + src1.reg = (reg_t)(cat6->a.src1); + src2.reg = (reg_t)(cat6->a.src2); + src2.im = cat6->a.src2_im; + src3.reg = (reg_t)(cat6->a.off); + src3.full = true; + dst.reg = (reg_t)(cat6->d.dst); + + print_src(ctx, &dst); + fprintf(ctx->out, ", g["); + print_src(ctx, &src1); + fprintf(ctx->out, "+"); + print_src(ctx, &src3); + fprintf(ctx->out, "], "); + print_src(ctx, &src2); + + return; + } + if (cat6->dst_off) { + dst.reg = (reg_t)(cat6->c.dst); + dstoff = cat6->c.off; + } else { + dst.reg = (reg_t)(cat6->d.dst); + } + + if (cat6->src_off) { + src1.reg = (reg_t)(cat6->a.src1); + src1.im = cat6->a.src1_im; + src2.reg = (reg_t)(cat6->a.src2); + src2.im = cat6->a.src2_im; + src1off = cat6->a.off; + } else { + src1.reg = (reg_t)(cat6->b.src1); + src1.im = cat6->b.src1_im; + src2.reg = (reg_t)(cat6->b.src2); + src2.im = cat6->b.src2_im; + } + + if (!nodst) { + if (sd) + fprintf(ctx->out, "%c[", sd); + /* note: dst might actually be a src (ie. address to store to) */ + print_src(ctx, &dst); + if (cat6->dst_off && cat6->g) { + struct reginfo dstoff_reg = {0}; + dstoff_reg.reg = (reg_t) cat6->c.off; + dstoff_reg.full = true; + fprintf(ctx->out, "+"); + print_src(ctx, &dstoff_reg); + } else if (dstoff) + fprintf(ctx->out, "%+d", dstoff); + if (sd) + fprintf(ctx->out, "]"); + fprintf(ctx->out, ", "); + } + + if (ss) + fprintf(ctx->out, "%c[", ss); + + /* can have a larger than normal immed, so hack: */ + if (src1.im) { + fprintf(ctx->out, "%u", src1.reg.dummy13); + } else { + print_src(ctx, &src1); + } + + if (cat6->src_off && cat6->g) + print_src(ctx, &src2); + else if (src1off) + fprintf(ctx->out, "%+d", src1off); + if (ss) + fprintf(ctx->out, "]"); + + switch (_OPC(6, cat6->opc)) { + case OPC_RESINFO: + case OPC_RESFMT: + break; + default: + fprintf(ctx->out, ", "); + print_src(ctx, &src2); + break; + } +} + +static void print_instr_cat6_a6xx(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat6_a6xx_t *cat6 = &instr->cat6_a6xx; + struct reginfo src1, src2, ssbo; + bool uses_type = _OPC(6, cat6->opc) != OPC_LDC; + + static const struct { + bool indirect; + bool bindless; + const char *name; + } desc_features[8] = { + [CAT6_IMM] = { + .name = "imm" + }, + [CAT6_UNIFORM] = { + .indirect = true, + .name = "uniform" + }, + [CAT6_NONUNIFORM] = { + .indirect = true, + .name = "nonuniform" + }, + [CAT6_BINDLESS_IMM] = { + .bindless = true, + .name = "imm" + }, + [CAT6_BINDLESS_UNIFORM] = { + .bindless = true, + .indirect = true, + .name = "uniform" + }, + [CAT6_BINDLESS_NONUNIFORM] = { + .bindless = true, + .indirect = true, + .name = "nonuniform" + }, + }; + + bool indirect_ssbo = desc_features[cat6->desc_mode].indirect; + bool bindless = desc_features[cat6->desc_mode].bindless; + bool type_full = cat6->type != TYPE_U16; + + + memset(&src1, 0, sizeof(src1)); + memset(&src2, 0, sizeof(src2)); + memset(&ssbo, 0, sizeof(ssbo)); + + if (uses_type) { + fprintf(ctx->out, ".%s", cat6->typed ? "typed" : "untyped"); + fprintf(ctx->out, ".%dd", cat6->d + 1); + fprintf(ctx->out, ".%s", type[cat6->type]); + } else { + fprintf(ctx->out, ".offset%d", cat6->d); + } + fprintf(ctx->out, ".%u", cat6->type_size + 1); + + fprintf(ctx->out, ".%s", desc_features[cat6->desc_mode].name); + if (bindless) + fprintf(ctx->out, ".base%d", cat6->base); + fprintf(ctx->out, " "); + + src2.reg = (reg_t)(cat6->src2); + src2.full = type_full; + print_src(ctx, &src2); + fprintf(ctx->out, ", "); + + src1.reg = (reg_t)(cat6->src1); + src1.full = true; // XXX + print_src(ctx, &src1); + fprintf(ctx->out, ", "); + ssbo.reg = (reg_t)(cat6->ssbo); + ssbo.im = !indirect_ssbo; + ssbo.full = true; + print_src(ctx, &ssbo); + + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, " (pad1=%x, pad2=%x, pad3=%x, pad4=%x, pad5=%x)", + cat6->pad1, cat6->pad2, cat6->pad3, cat6->pad4, cat6->pad5); + } +} + +static void print_instr_cat6(struct disasm_ctx *ctx, instr_t *instr) +{ + if (!is_cat6_legacy(instr, ctx->gpu_id)) { + print_instr_cat6_a6xx(ctx, instr); + if (debug & PRINT_VERBOSE) + fprintf(ctx->out, " NEW"); + } else { + print_instr_cat6_a3xx(ctx, instr); + if (debug & PRINT_VERBOSE) + fprintf(ctx->out, " LEGACY"); + } +} +static void print_instr_cat7(struct disasm_ctx *ctx, instr_t *instr) +{ + instr_cat7_t *cat7 = &instr->cat7; + + if (cat7->g) + fprintf(ctx->out, ".g"); + if (cat7->l) + fprintf(ctx->out, ".l"); + + if (_OPC(7, cat7->opc) == OPC_FENCE) { + if (cat7->r) + fprintf(ctx->out, ".r"); + if (cat7->w) + fprintf(ctx->out, ".w"); + } +} + +/* size of largest OPC field of all the instruction categories: */ +#define NOPC_BITS 6 + +static const struct opc_info { + uint16_t cat; + uint16_t opc; + const char *name; + void (*print)(struct disasm_ctx *ctx, instr_t *instr); +} opcs[1 << (3+NOPC_BITS)] = { +#define OPC(cat, opc, name) [(opc)] = { (cat), (opc), #name, print_instr_cat##cat } + /* category 0: */ + OPC(0, OPC_NOP, nop), + OPC(0, OPC_B, b), + OPC(0, OPC_JUMP, jump), + OPC(0, OPC_CALL, call), + OPC(0, OPC_RET, ret), + OPC(0, OPC_KILL, kill), + OPC(0, OPC_END, end), + OPC(0, OPC_EMIT, emit), + OPC(0, OPC_CUT, cut), + OPC(0, OPC_CHMASK, chmask), + OPC(0, OPC_CHSH, chsh), + OPC(0, OPC_FLOW_REV, flow_rev), + OPC(0, OPC_PREDT, predt), + OPC(0, OPC_PREDF, predf), + OPC(0, OPC_PREDE, prede), + OPC(0, OPC_BKT, bkt), + OPC(0, OPC_STKS, stks), + OPC(0, OPC_STKR, stkr), + OPC(0, OPC_XSET, xset), + OPC(0, OPC_XCLR, xclr), + OPC(0, OPC_GETONE, getone), + OPC(0, OPC_DBG, dbg), + OPC(0, OPC_SHPS, shps), + OPC(0, OPC_SHPE, shpe), + + /* category 1: */ + OPC(1, OPC_MOV, ), + + /* category 2: */ + OPC(2, OPC_ADD_F, add.f), + OPC(2, OPC_MIN_F, min.f), + OPC(2, OPC_MAX_F, max.f), + OPC(2, OPC_MUL_F, mul.f), + OPC(2, OPC_SIGN_F, sign.f), + OPC(2, OPC_CMPS_F, cmps.f), + OPC(2, OPC_ABSNEG_F, absneg.f), + OPC(2, OPC_CMPV_F, cmpv.f), + OPC(2, OPC_FLOOR_F, floor.f), + OPC(2, OPC_CEIL_F, ceil.f), + OPC(2, OPC_RNDNE_F, rndne.f), + OPC(2, OPC_RNDAZ_F, rndaz.f), + OPC(2, OPC_TRUNC_F, trunc.f), + OPC(2, OPC_ADD_U, add.u), + OPC(2, OPC_ADD_S, add.s), + OPC(2, OPC_SUB_U, sub.u), + OPC(2, OPC_SUB_S, sub.s), + OPC(2, OPC_CMPS_U, cmps.u), + OPC(2, OPC_CMPS_S, cmps.s), + OPC(2, OPC_MIN_U, min.u), + OPC(2, OPC_MIN_S, min.s), + OPC(2, OPC_MAX_U, max.u), + OPC(2, OPC_MAX_S, max.s), + OPC(2, OPC_ABSNEG_S, absneg.s), + OPC(2, OPC_AND_B, and.b), + OPC(2, OPC_OR_B, or.b), + OPC(2, OPC_NOT_B, not.b), + OPC(2, OPC_XOR_B, xor.b), + OPC(2, OPC_CMPV_U, cmpv.u), + OPC(2, OPC_CMPV_S, cmpv.s), + OPC(2, OPC_MUL_U24, mul.u24), + OPC(2, OPC_MUL_S24, mul.s24), + OPC(2, OPC_MULL_U, mull.u), + OPC(2, OPC_BFREV_B, bfrev.b), + OPC(2, OPC_CLZ_S, clz.s), + OPC(2, OPC_CLZ_B, clz.b), + OPC(2, OPC_SHL_B, shl.b), + OPC(2, OPC_SHR_B, shr.b), + OPC(2, OPC_ASHR_B, ashr.b), + OPC(2, OPC_BARY_F, bary.f), + OPC(2, OPC_MGEN_B, mgen.b), + OPC(2, OPC_GETBIT_B, getbit.b), + OPC(2, OPC_SETRM, setrm), + OPC(2, OPC_CBITS_B, cbits.b), + OPC(2, OPC_SHB, shb), + OPC(2, OPC_MSAD, msad), + + /* category 3: */ + OPC(3, OPC_MAD_U16, mad.u16), + OPC(3, OPC_MADSH_U16, madsh.u16), + OPC(3, OPC_MAD_S16, mad.s16), + OPC(3, OPC_MADSH_M16, madsh.m16), + OPC(3, OPC_MAD_U24, mad.u24), + OPC(3, OPC_MAD_S24, mad.s24), + OPC(3, OPC_MAD_F16, mad.f16), + OPC(3, OPC_MAD_F32, mad.f32), + OPC(3, OPC_SEL_B16, sel.b16), + OPC(3, OPC_SEL_B32, sel.b32), + OPC(3, OPC_SEL_S16, sel.s16), + OPC(3, OPC_SEL_S32, sel.s32), + OPC(3, OPC_SEL_F16, sel.f16), + OPC(3, OPC_SEL_F32, sel.f32), + OPC(3, OPC_SAD_S16, sad.s16), + OPC(3, OPC_SAD_S32, sad.s32), + + /* category 4: */ + OPC(4, OPC_RCP, rcp), + OPC(4, OPC_RSQ, rsq), + OPC(4, OPC_LOG2, log2), + OPC(4, OPC_EXP2, exp2), + OPC(4, OPC_SIN, sin), + OPC(4, OPC_COS, cos), + OPC(4, OPC_SQRT, sqrt), + OPC(4, OPC_HRSQ, hrsq), + OPC(4, OPC_HLOG2, hlog2), + OPC(4, OPC_HEXP2, hexp2), + + /* category 5: */ + OPC(5, OPC_ISAM, isam), + OPC(5, OPC_ISAML, isaml), + OPC(5, OPC_ISAMM, isamm), + OPC(5, OPC_SAM, sam), + OPC(5, OPC_SAMB, samb), + OPC(5, OPC_SAML, saml), + OPC(5, OPC_SAMGQ, samgq), + OPC(5, OPC_GETLOD, getlod), + OPC(5, OPC_CONV, conv), + OPC(5, OPC_CONVM, convm), + OPC(5, OPC_GETSIZE, getsize), + OPC(5, OPC_GETBUF, getbuf), + OPC(5, OPC_GETPOS, getpos), + OPC(5, OPC_GETINFO, getinfo), + OPC(5, OPC_DSX, dsx), + OPC(5, OPC_DSY, dsy), + OPC(5, OPC_GATHER4R, gather4r), + OPC(5, OPC_GATHER4G, gather4g), + OPC(5, OPC_GATHER4B, gather4b), + OPC(5, OPC_GATHER4A, gather4a), + OPC(5, OPC_SAMGP0, samgp0), + OPC(5, OPC_SAMGP1, samgp1), + OPC(5, OPC_SAMGP2, samgp2), + OPC(5, OPC_SAMGP3, samgp3), + OPC(5, OPC_DSXPP_1, dsxpp.1), + OPC(5, OPC_DSYPP_1, dsypp.1), + OPC(5, OPC_RGETPOS, rgetpos), + OPC(5, OPC_RGETINFO, rgetinfo), + + + /* category 6: */ + OPC(6, OPC_LDG, ldg), + OPC(6, OPC_LDL, ldl), + OPC(6, OPC_LDP, ldp), + OPC(6, OPC_STG, stg), + OPC(6, OPC_STL, stl), + OPC(6, OPC_STP, stp), + OPC(6, OPC_LDIB, ldib), + OPC(6, OPC_G2L, g2l), + OPC(6, OPC_L2G, l2g), + OPC(6, OPC_PREFETCH, prefetch), + OPC(6, OPC_LDLW, ldlw), + OPC(6, OPC_STLW, stlw), + OPC(6, OPC_RESFMT, resfmt), + OPC(6, OPC_RESINFO, resinfo), + OPC(6, OPC_ATOMIC_ADD, atomic.add), + OPC(6, OPC_ATOMIC_SUB, atomic.sub), + OPC(6, OPC_ATOMIC_XCHG, atomic.xchg), + OPC(6, OPC_ATOMIC_INC, atomic.inc), + OPC(6, OPC_ATOMIC_DEC, atomic.dec), + OPC(6, OPC_ATOMIC_CMPXCHG, atomic.cmpxchg), + OPC(6, OPC_ATOMIC_MIN, atomic.min), + OPC(6, OPC_ATOMIC_MAX, atomic.max), + OPC(6, OPC_ATOMIC_AND, atomic.and), + OPC(6, OPC_ATOMIC_OR, atomic.or), + OPC(6, OPC_ATOMIC_XOR, atomic.xor), + OPC(6, OPC_LDGB, ldgb), + OPC(6, OPC_STGB, stgb), + OPC(6, OPC_STIB, stib), + OPC(6, OPC_LDC, ldc), + OPC(6, OPC_LDLV, ldlv), + + OPC(7, OPC_BAR, bar), + OPC(7, OPC_FENCE, fence), + +#undef OPC +}; + +#define GETINFO(instr) (&(opcs[((instr)->opc_cat << NOPC_BITS) | instr_opc(instr, ctx->gpu_id)])) + +// XXX hack.. probably should move this table somewhere common: +#include "ir3.h" +const char *ir3_instr_name(struct ir3_instruction *instr) +{ + if (opc_cat(instr->opc) == -1) return "??meta??"; + return opcs[instr->opc].name; +} + +static void print_single_instr(struct disasm_ctx *ctx, instr_t *instr) +{ + const char *name = GETINFO(instr)->name; + uint32_t opc = instr_opc(instr, ctx->gpu_id); + + if (name) { + fprintf(ctx->out, "%s", name); + GETINFO(instr)->print(ctx, instr); + } else { + fprintf(ctx->out, "unknown(%d,%d)", instr->opc_cat, opc); + + switch (instr->opc_cat) { + case 0: print_instr_cat0(ctx, instr); break; + case 1: print_instr_cat1(ctx, instr); break; + case 2: print_instr_cat2(ctx, instr); break; + case 3: print_instr_cat3(ctx, instr); break; + case 4: print_instr_cat4(ctx, instr); break; + case 5: print_instr_cat5(ctx, instr); break; + case 6: print_instr_cat6(ctx, instr); break; + case 7: print_instr_cat7(ctx, instr); break; + } + } +} + +static bool print_instr(struct disasm_ctx *ctx, uint32_t *dwords, int n) +{ + instr_t *instr = (instr_t *)dwords; + uint32_t opc = instr_opc(instr, ctx->gpu_id); + unsigned nop = 0; + unsigned cycles = ctx->instructions; + + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, "%s%04d:%04d[%08xx_%08xx] ", levels[ctx->level], + n, cycles++, dwords[1], dwords[0]); + } + + /* NOTE: order flags are printed is a bit fugly.. but for now I + * try to match the order in llvm-a3xx disassembler for easy + * diff'ing.. + */ + + ctx->repeat = instr_repeat(instr); + ctx->instructions += 1 + ctx->repeat; + + if (instr->sync) { + fprintf(ctx->out, "(sy)"); + } + if (instr->ss && ((instr->opc_cat <= 4) || (instr->opc_cat == 7))) { + fprintf(ctx->out, "(ss)"); + } + if (instr->jmp_tgt) + fprintf(ctx->out, "(jp)"); + if ((instr->opc_cat == 0) && instr->cat0.eq) + fprintf(ctx->out, "(eq)"); + if (instr_sat(instr)) + fprintf(ctx->out, "(sat)"); + if (ctx->repeat) + fprintf(ctx->out, "(rpt%d)", ctx->repeat); + else if ((instr->opc_cat == 2) && (instr->cat2.src1_r || instr->cat2.src2_r)) + nop = (instr->cat2.src2_r * 2) + instr->cat2.src1_r; + else if ((instr->opc_cat == 3) && (instr->cat3.src1_r || instr->cat3.src2_r)) + nop = (instr->cat3.src2_r * 2) + instr->cat3.src1_r; + ctx->instructions += nop; + if (nop) + fprintf(ctx->out, "(nop%d) ", nop); + + if (instr->ul && ((2 <= instr->opc_cat) && (instr->opc_cat <= 4))) + fprintf(ctx->out, "(ul)"); + + print_single_instr(ctx, instr); + fprintf(ctx->out, "\n"); + + if ((instr->opc_cat <= 4) && (debug & EXPAND_REPEAT)) { + int i; + for (i = 0; i < nop; i++) { + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, "%s%04d:%04d[ ] ", + levels[ctx->level], n, cycles++); + } + fprintf(ctx->out, "nop\n"); + } + for (i = 0; i < ctx->repeat; i++) { + ctx->repeatidx = i + 1; + if (debug & PRINT_VERBOSE) { + fprintf(ctx->out, "%s%04d:%04d[ ] ", + levels[ctx->level], n, cycles++); + } + print_single_instr(ctx, instr); + fprintf(ctx->out, "\n"); + } + ctx->repeatidx = 0; + } + + return (instr->opc_cat == 0) && (opc == OPC_END); +} + +int disasm_a3xx(uint32_t *dwords, int sizedwords, int level, FILE *out, unsigned gpu_id) +{ + struct disasm_ctx ctx; + int i; + int nop_count = 0; + + //assert((sizedwords % 2) == 0); + + memset(&ctx, 0, sizeof(ctx)); + ctx.out = out; + ctx.level = level; + ctx.gpu_id = gpu_id; + + for (i = 0; i < sizedwords; i += 2) { + print_instr(&ctx, &dwords[i], i/2); + if (dwords[i] == 0 && dwords[i + 1] == 0) + nop_count++; + else + nop_count = 0; + if (nop_count > 3) + break; + } + + return 0; +} + +int main(int argc, char *argv[]) { + uint32_t buf[0x10000]; + FILE *f = fopen(argv[1], "rb"); + if (argc > 2) { + int seek = atoi(argv[2]); + printf("skip %d\n", seek); + fread(buf, 1, seek , f); + } + int len = fread(buf, 1, sizeof(buf), f); + fclose(f); + + disasm_a3xx(buf, len/4, 0, stdout, 0); +} + diff --git a/selfdrive/modeld/thneed/debug/decompiler/instr-a3xx.h b/selfdrive/modeld/thneed/debug/decompiler/instr-a3xx.h new file mode 100644 index 0000000000..e4f548d639 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/instr-a3xx.h @@ -0,0 +1,1119 @@ +/* + * Copyright (c) 2013 Rob Clark + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef INSTR_A3XX_H_ +#define INSTR_A3XX_H_ + +#define PACKED __attribute__((__packed__)) + +#include +#include +#include +#include + +/* size of largest OPC field of all the instruction categories: */ +#define NOPC_BITS 6 + +#define _OPC(cat, opc) (((cat) << NOPC_BITS) | opc) + +typedef enum { + /* category 0: */ + OPC_NOP = _OPC(0, 0), + OPC_B = _OPC(0, 1), + OPC_JUMP = _OPC(0, 2), + OPC_CALL = _OPC(0, 3), + OPC_RET = _OPC(0, 4), + OPC_KILL = _OPC(0, 5), + OPC_END = _OPC(0, 6), + OPC_EMIT = _OPC(0, 7), + OPC_CUT = _OPC(0, 8), + OPC_CHMASK = _OPC(0, 9), + OPC_CHSH = _OPC(0, 10), + OPC_FLOW_REV = _OPC(0, 11), + + OPC_BKT = _OPC(0, 16), + OPC_STKS = _OPC(0, 17), + OPC_STKR = _OPC(0, 18), + OPC_XSET = _OPC(0, 19), + OPC_XCLR = _OPC(0, 20), + OPC_GETONE = _OPC(0, 21), + OPC_DBG = _OPC(0, 22), + OPC_SHPS = _OPC(0, 23), /* shader prologue start */ + OPC_SHPE = _OPC(0, 24), /* shader prologue end */ + + OPC_PREDT = _OPC(0, 29), /* predicated true */ + OPC_PREDF = _OPC(0, 30), /* predicated false */ + OPC_PREDE = _OPC(0, 31), /* predicated end */ + + /* category 1: */ + OPC_MOV = _OPC(1, 0), + + /* category 2: */ + OPC_ADD_F = _OPC(2, 0), + OPC_MIN_F = _OPC(2, 1), + OPC_MAX_F = _OPC(2, 2), + OPC_MUL_F = _OPC(2, 3), + OPC_SIGN_F = _OPC(2, 4), + OPC_CMPS_F = _OPC(2, 5), + OPC_ABSNEG_F = _OPC(2, 6), + OPC_CMPV_F = _OPC(2, 7), + /* 8 - invalid */ + OPC_FLOOR_F = _OPC(2, 9), + OPC_CEIL_F = _OPC(2, 10), + OPC_RNDNE_F = _OPC(2, 11), + OPC_RNDAZ_F = _OPC(2, 12), + OPC_TRUNC_F = _OPC(2, 13), + /* 14-15 - invalid */ + OPC_ADD_U = _OPC(2, 16), + OPC_ADD_S = _OPC(2, 17), + OPC_SUB_U = _OPC(2, 18), + OPC_SUB_S = _OPC(2, 19), + OPC_CMPS_U = _OPC(2, 20), + OPC_CMPS_S = _OPC(2, 21), + OPC_MIN_U = _OPC(2, 22), + OPC_MIN_S = _OPC(2, 23), + OPC_MAX_U = _OPC(2, 24), + OPC_MAX_S = _OPC(2, 25), + OPC_ABSNEG_S = _OPC(2, 26), + /* 27 - invalid */ + OPC_AND_B = _OPC(2, 28), + OPC_OR_B = _OPC(2, 29), + OPC_NOT_B = _OPC(2, 30), + OPC_XOR_B = _OPC(2, 31), + /* 32 - invalid */ + OPC_CMPV_U = _OPC(2, 33), + OPC_CMPV_S = _OPC(2, 34), + /* 35-47 - invalid */ + OPC_MUL_U24 = _OPC(2, 48), /* 24b mul into 32b result */ + OPC_MUL_S24 = _OPC(2, 49), /* 24b mul into 32b result with sign extension */ + OPC_MULL_U = _OPC(2, 50), + OPC_BFREV_B = _OPC(2, 51), + OPC_CLZ_S = _OPC(2, 52), + OPC_CLZ_B = _OPC(2, 53), + OPC_SHL_B = _OPC(2, 54), + OPC_SHR_B = _OPC(2, 55), + OPC_ASHR_B = _OPC(2, 56), + OPC_BARY_F = _OPC(2, 57), + OPC_MGEN_B = _OPC(2, 58), + OPC_GETBIT_B = _OPC(2, 59), + OPC_SETRM = _OPC(2, 60), + OPC_CBITS_B = _OPC(2, 61), + OPC_SHB = _OPC(2, 62), + OPC_MSAD = _OPC(2, 63), + + /* category 3: */ + OPC_MAD_U16 = _OPC(3, 0), + OPC_MADSH_U16 = _OPC(3, 1), + OPC_MAD_S16 = _OPC(3, 2), + OPC_MADSH_M16 = _OPC(3, 3), /* should this be .s16? */ + OPC_MAD_U24 = _OPC(3, 4), + OPC_MAD_S24 = _OPC(3, 5), + OPC_MAD_F16 = _OPC(3, 6), + OPC_MAD_F32 = _OPC(3, 7), + OPC_SEL_B16 = _OPC(3, 8), + OPC_SEL_B32 = _OPC(3, 9), + OPC_SEL_S16 = _OPC(3, 10), + OPC_SEL_S32 = _OPC(3, 11), + OPC_SEL_F16 = _OPC(3, 12), + OPC_SEL_F32 = _OPC(3, 13), + OPC_SAD_S16 = _OPC(3, 14), + OPC_SAD_S32 = _OPC(3, 15), + + /* category 4: */ + OPC_RCP = _OPC(4, 0), + OPC_RSQ = _OPC(4, 1), + OPC_LOG2 = _OPC(4, 2), + OPC_EXP2 = _OPC(4, 3), + OPC_SIN = _OPC(4, 4), + OPC_COS = _OPC(4, 5), + OPC_SQRT = _OPC(4, 6), + /* NOTE that these are 8+opc from their highp equivs, so it's possible + * that the high order bit in the opc field has been repurposed for + * half-precision use? But note that other ops (rcp/lsin/cos/sqrt) + * still use the same opc as highp + */ + OPC_HRSQ = _OPC(4, 9), + OPC_HLOG2 = _OPC(4, 10), + OPC_HEXP2 = _OPC(4, 11), + + /* category 5: */ + OPC_ISAM = _OPC(5, 0), + OPC_ISAML = _OPC(5, 1), + OPC_ISAMM = _OPC(5, 2), + OPC_SAM = _OPC(5, 3), + OPC_SAMB = _OPC(5, 4), + OPC_SAML = _OPC(5, 5), + OPC_SAMGQ = _OPC(5, 6), + OPC_GETLOD = _OPC(5, 7), + OPC_CONV = _OPC(5, 8), + OPC_CONVM = _OPC(5, 9), + OPC_GETSIZE = _OPC(5, 10), + OPC_GETBUF = _OPC(5, 11), + OPC_GETPOS = _OPC(5, 12), + OPC_GETINFO = _OPC(5, 13), + OPC_DSX = _OPC(5, 14), + OPC_DSY = _OPC(5, 15), + OPC_GATHER4R = _OPC(5, 16), + OPC_GATHER4G = _OPC(5, 17), + OPC_GATHER4B = _OPC(5, 18), + OPC_GATHER4A = _OPC(5, 19), + OPC_SAMGP0 = _OPC(5, 20), + OPC_SAMGP1 = _OPC(5, 21), + OPC_SAMGP2 = _OPC(5, 22), + OPC_SAMGP3 = _OPC(5, 23), + OPC_DSXPP_1 = _OPC(5, 24), + OPC_DSYPP_1 = _OPC(5, 25), + OPC_RGETPOS = _OPC(5, 26), + OPC_RGETINFO = _OPC(5, 27), + + /* category 6: */ + OPC_LDG = _OPC(6, 0), /* load-global */ + OPC_LDL = _OPC(6, 1), + OPC_LDP = _OPC(6, 2), + OPC_STG = _OPC(6, 3), /* store-global */ + OPC_STL = _OPC(6, 4), + OPC_STP = _OPC(6, 5), + OPC_LDIB = _OPC(6, 6), + OPC_G2L = _OPC(6, 7), + OPC_L2G = _OPC(6, 8), + OPC_PREFETCH = _OPC(6, 9), + OPC_LDLW = _OPC(6, 10), + OPC_STLW = _OPC(6, 11), + OPC_RESFMT = _OPC(6, 14), + OPC_RESINFO = _OPC(6, 15), + OPC_ATOMIC_ADD = _OPC(6, 16), + OPC_ATOMIC_SUB = _OPC(6, 17), + OPC_ATOMIC_XCHG = _OPC(6, 18), + OPC_ATOMIC_INC = _OPC(6, 19), + OPC_ATOMIC_DEC = _OPC(6, 20), + OPC_ATOMIC_CMPXCHG = _OPC(6, 21), + OPC_ATOMIC_MIN = _OPC(6, 22), + OPC_ATOMIC_MAX = _OPC(6, 23), + OPC_ATOMIC_AND = _OPC(6, 24), + OPC_ATOMIC_OR = _OPC(6, 25), + OPC_ATOMIC_XOR = _OPC(6, 26), + OPC_LDGB = _OPC(6, 27), + OPC_STGB = _OPC(6, 28), + OPC_STIB = _OPC(6, 29), + OPC_LDC = _OPC(6, 30), + OPC_LDLV = _OPC(6, 31), + + /* category 7: */ + OPC_BAR = _OPC(7, 0), + OPC_FENCE = _OPC(7, 1), + + /* meta instructions (category -1): */ + /* placeholder instr to mark shader inputs: */ + OPC_META_INPUT = _OPC(-1, 0), + /* The "collect" and "split" instructions are used for keeping + * track of instructions that write to multiple dst registers + * (split) like texture sample instructions, or read multiple + * consecutive scalar registers (collect) (bary.f, texture samp) + * + * A "split" extracts a scalar component from a vecN, and a + * "collect" gathers multiple scalar components into a vecN + */ + OPC_META_SPLIT = _OPC(-1, 2), + OPC_META_COLLECT = _OPC(-1, 3), + + /* placeholder for texture fetches that run before FS invocation + * starts: + */ + OPC_META_TEX_PREFETCH = _OPC(-1, 4), + +} opc_t; + +#define opc_cat(opc) ((int)((opc) >> NOPC_BITS)) +#define opc_op(opc) ((unsigned)((opc) & ((1 << NOPC_BITS) - 1))) + +typedef enum { + TYPE_F16 = 0, + TYPE_F32 = 1, + TYPE_U16 = 2, + TYPE_U32 = 3, + TYPE_S16 = 4, + TYPE_S32 = 5, + TYPE_U8 = 6, + TYPE_S8 = 7, // XXX I assume? +} type_t; + +static inline uint32_t type_size(type_t type) +{ + switch (type) { + case TYPE_F32: + case TYPE_U32: + case TYPE_S32: + return 32; + case TYPE_F16: + case TYPE_U16: + case TYPE_S16: + return 16; + case TYPE_U8: + case TYPE_S8: + return 8; + default: + assert(0); /* invalid type */ + return 0; + } +} + +static inline int type_float(type_t type) +{ + return (type == TYPE_F32) || (type == TYPE_F16); +} + +static inline int type_uint(type_t type) +{ + return (type == TYPE_U32) || (type == TYPE_U16) || (type == TYPE_U8); +} + +static inline int type_sint(type_t type) +{ + return (type == TYPE_S32) || (type == TYPE_S16) || (type == TYPE_S8); +} + +typedef union PACKED { + /* normal gpr or const src register: */ + struct PACKED { + uint32_t comp : 2; + uint32_t num : 10; + }; + /* for immediate val: */ + int32_t iim_val : 11; + /* to make compiler happy: */ + uint32_t dummy32; + uint32_t dummy10 : 10; + int32_t idummy10 : 10; + uint32_t dummy11 : 11; + uint32_t dummy12 : 12; + uint32_t dummy13 : 13; + uint32_t dummy8 : 8; + int32_t idummy13 : 13; + int32_t idummy8 : 8; +} reg_t; + +/* special registers: */ +#define REG_A0 61 /* address register */ +#define REG_P0 62 /* predicate register */ + +static inline int reg_special(reg_t reg) +{ + return (reg.num == REG_A0) || (reg.num == REG_P0); +} + +typedef enum { + BRANCH_PLAIN = 0, /* br */ + BRANCH_OR = 1, /* brao */ + BRANCH_AND = 2, /* braa */ + BRANCH_CONST = 3, /* brac */ + BRANCH_ANY = 4, /* bany */ + BRANCH_ALL = 5, /* ball */ + BRANCH_X = 6, /* brax ??? */ +} brtype_t; + +typedef struct PACKED { + /* dword0: */ + union PACKED { + struct PACKED { + int16_t immed : 16; + uint32_t dummy1 : 16; + } a3xx; + struct PACKED { + int32_t immed : 20; + uint32_t dummy1 : 12; + } a4xx; + struct PACKED { + int32_t immed : 32; + } a5xx; + }; + + /* dword1: */ + uint32_t idx : 5; /* brac.N index */ + uint32_t brtype : 3; /* branch type, see brtype_t */ + uint32_t repeat : 3; + uint32_t dummy3 : 1; + uint32_t ss : 1; + uint32_t inv1 : 1; + uint32_t comp1 : 2; + uint32_t eq : 1; + uint32_t opc_hi : 1; /* at least one bit */ + uint32_t dummy4 : 2; + uint32_t inv0 : 1; + uint32_t comp0 : 2; /* component for first src */ + uint32_t opc : 4; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat0_t; + +typedef struct PACKED { + /* dword0: */ + union PACKED { + /* for normal src register: */ + struct PACKED { + uint32_t src : 11; + /* at least low bit of pad must be zero or it will + * look like a address relative src + */ + uint32_t pad : 21; + }; + /* for address relative: */ + struct PACKED { + int32_t off : 10; + uint32_t src_rel_c : 1; + uint32_t src_rel : 1; + uint32_t unknown : 20; + }; + /* for immediate: */ + int32_t iim_val; + uint32_t uim_val; + float fim_val; + }; + + /* dword1: */ + uint32_t dst : 8; + uint32_t repeat : 3; + uint32_t src_r : 1; + uint32_t ss : 1; + uint32_t ul : 1; + uint32_t dst_type : 3; + uint32_t dst_rel : 1; + uint32_t src_type : 3; + uint32_t src_c : 1; + uint32_t src_im : 1; + uint32_t even : 1; + uint32_t pos_inf : 1; + uint32_t must_be_0 : 2; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat1_t; + +typedef struct PACKED { + /* dword0: */ + union PACKED { + struct PACKED { + uint32_t src1 : 11; + uint32_t must_be_zero1: 2; + uint32_t src1_im : 1; /* immediate */ + uint32_t src1_neg : 1; /* negate */ + uint32_t src1_abs : 1; /* absolute value */ + }; + struct PACKED { + uint32_t src1 : 10; + uint32_t src1_c : 1; /* relative-const */ + uint32_t src1_rel : 1; /* relative address */ + uint32_t must_be_zero : 1; + uint32_t dummy : 3; + } rel1; + struct PACKED { + uint32_t src1 : 12; + uint32_t src1_c : 1; /* const */ + uint32_t dummy : 3; + } c1; + }; + + union PACKED { + struct PACKED { + uint32_t src2 : 11; + uint32_t must_be_zero2: 2; + uint32_t src2_im : 1; /* immediate */ + uint32_t src2_neg : 1; /* negate */ + uint32_t src2_abs : 1; /* absolute value */ + }; + struct PACKED { + uint32_t src2 : 10; + uint32_t src2_c : 1; /* relative-const */ + uint32_t src2_rel : 1; /* relative address */ + uint32_t must_be_zero : 1; + uint32_t dummy : 3; + } rel2; + struct PACKED { + uint32_t src2 : 12; + uint32_t src2_c : 1; /* const */ + uint32_t dummy : 3; + } c2; + }; + + /* dword1: */ + uint32_t dst : 8; + uint32_t repeat : 2; + uint32_t sat : 1; + uint32_t src1_r : 1; /* doubles as nop0 if repeat==0 */ + uint32_t ss : 1; + uint32_t ul : 1; /* dunno */ + uint32_t dst_half : 1; /* or widen/narrow.. ie. dst hrN <-> rN */ + uint32_t ei : 1; + uint32_t cond : 3; + uint32_t src2_r : 1; /* doubles as nop1 if repeat==0 */ + uint32_t full : 1; /* not half */ + uint32_t opc : 6; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat2_t; + +typedef struct PACKED { + /* dword0: */ + union PACKED { + struct PACKED { + uint32_t src1 : 11; + uint32_t must_be_zero1: 2; + uint32_t src2_c : 1; + uint32_t src1_neg : 1; + uint32_t src2_r : 1; /* doubles as nop1 if repeat==0 */ + }; + struct PACKED { + uint32_t src1 : 10; + uint32_t src1_c : 1; + uint32_t src1_rel : 1; + uint32_t must_be_zero : 1; + uint32_t dummy : 3; + } rel1; + struct PACKED { + uint32_t src1 : 12; + uint32_t src1_c : 1; + uint32_t dummy : 3; + } c1; + }; + + union PACKED { + struct PACKED { + uint32_t src3 : 11; + uint32_t must_be_zero2: 2; + uint32_t src3_r : 1; + uint32_t src2_neg : 1; + uint32_t src3_neg : 1; + }; + struct PACKED { + uint32_t src3 : 10; + uint32_t src3_c : 1; + uint32_t src3_rel : 1; + uint32_t must_be_zero : 1; + uint32_t dummy : 3; + } rel2; + struct PACKED { + uint32_t src3 : 12; + uint32_t src3_c : 1; + uint32_t dummy : 3; + } c2; + }; + + /* dword1: */ + uint32_t dst : 8; + uint32_t repeat : 2; + uint32_t sat : 1; + uint32_t src1_r : 1; /* doubles as nop0 if repeat==0 */ + uint32_t ss : 1; + uint32_t ul : 1; + uint32_t dst_half : 1; /* or widen/narrow.. ie. dst hrN <-> rN */ + uint32_t src2 : 8; + uint32_t opc : 4; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat3_t; + +static inline bool instr_cat3_full(instr_cat3_t *cat3) +{ + switch (_OPC(3, cat3->opc)) { + case OPC_MAD_F16: + case OPC_MAD_U16: + case OPC_MAD_S16: + case OPC_SEL_B16: + case OPC_SEL_S16: + case OPC_SEL_F16: + case OPC_SAD_S16: + case OPC_SAD_S32: // really?? + return false; + default: + return true; + } +} + +typedef struct PACKED { + /* dword0: */ + union PACKED { + struct PACKED { + uint32_t src : 11; + uint32_t must_be_zero1: 2; + uint32_t src_im : 1; /* immediate */ + uint32_t src_neg : 1; /* negate */ + uint32_t src_abs : 1; /* absolute value */ + }; + struct PACKED { + uint32_t src : 10; + uint32_t src_c : 1; /* relative-const */ + uint32_t src_rel : 1; /* relative address */ + uint32_t must_be_zero : 1; + uint32_t dummy : 3; + } rel; + struct PACKED { + uint32_t src : 12; + uint32_t src_c : 1; /* const */ + uint32_t dummy : 3; + } c; + }; + uint32_t dummy1 : 16; /* seem to be ignored */ + + /* dword1: */ + uint32_t dst : 8; + uint32_t repeat : 2; + uint32_t sat : 1; + uint32_t src_r : 1; + uint32_t ss : 1; + uint32_t ul : 1; + uint32_t dst_half : 1; /* or widen/narrow.. ie. dst hrN <-> rN */ + uint32_t dummy2 : 5; /* seem to be ignored */ + uint32_t full : 1; /* not half */ + uint32_t opc : 6; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat4_t; + +/* With is_bindless_s2en = 1, this determines whether bindless is enabled and + * if so, how to get the (base, index) pair for both sampler and texture. + * There is a single base embedded in the instruction, which is always used + * for the texture. + */ +typedef enum { + /* Use traditional GL binding model, get texture and sampler index + * from src3 which is not presumed to be uniform. This is + * backwards-compatible with earlier generations, where this field was + * always 0 and nonuniform-indexed sampling always worked. + */ + CAT5_NONUNIFORM = 0, + + /* The sampler base comes from the low 3 bits of a1.x, and the sampler + * and texture index come from src3 which is presumed to be uniform. + */ + CAT5_BINDLESS_A1_UNIFORM = 1, + + /* The texture and sampler share the same base, and the sampler and + * texture index come from src3 which is *not* presumed to be uniform. + */ + CAT5_BINDLESS_NONUNIFORM = 2, + + /* The sampler base comes from the low 3 bits of a1.x, and the sampler + * and texture index come from src3 which is *not* presumed to be + * uniform. + */ + CAT5_BINDLESS_A1_NONUNIFORM = 3, + + /* Use traditional GL binding model, get texture and sampler index + * from src3 which is presumed to be uniform. + */ + CAT5_UNIFORM = 4, + + /* The texture and sampler share the same base, and the sampler and + * texture index come from src3 which is presumed to be uniform. + */ + CAT5_BINDLESS_UNIFORM = 5, + + /* The texture and sampler share the same base, get sampler index from low + * 4 bits of src3 and texture index from high 4 bits. + */ + CAT5_BINDLESS_IMM = 6, + + /* The sampler base comes from the low 3 bits of a1.x, and the texture + * index comes from the next 8 bits of a1.x. The sampler index is an + * immediate in src3. + */ + CAT5_BINDLESS_A1_IMM = 7, +} cat5_desc_mode_t; + +typedef struct PACKED { + /* dword0: */ + union PACKED { + /* normal case: */ + struct PACKED { + uint32_t full : 1; /* not half */ + uint32_t src1 : 8; + uint32_t src2 : 8; + uint32_t dummy1 : 4; /* seem to be ignored */ + uint32_t samp : 4; + uint32_t tex : 7; + } norm; + /* s2en case: */ + struct PACKED { + uint32_t full : 1; /* not half */ + uint32_t src1 : 8; + uint32_t src2 : 8; + uint32_t dummy1 : 2; + uint32_t base_hi : 2; + uint32_t src3 : 8; + uint32_t desc_mode : 3; + } s2en_bindless; + /* same in either case: */ + // XXX I think, confirm this + struct PACKED { + uint32_t full : 1; /* not half */ + uint32_t src1 : 8; + uint32_t src2 : 8; + uint32_t pad : 15; + }; + }; + + /* dword1: */ + uint32_t dst : 8; + uint32_t wrmask : 4; /* write-mask */ + uint32_t type : 3; + uint32_t base_lo : 1; /* used with bindless */ + uint32_t is_3d : 1; + + uint32_t is_a : 1; + uint32_t is_s : 1; + uint32_t is_s2en_bindless : 1; + uint32_t is_o : 1; + uint32_t is_p : 1; + + uint32_t opc : 5; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat5_t; + +/* dword0 encoding for src_off: [src1 + off], src2: */ +typedef struct PACKED { + /* dword0: */ + uint32_t mustbe1 : 1; + int32_t off : 13; + uint32_t src1 : 8; + uint32_t src1_im : 1; + uint32_t src2_im : 1; + uint32_t src2 : 8; + + /* dword1: */ + uint32_t dword1; +} instr_cat6a_t; + +/* dword0 encoding for !src_off: [src1], src2 */ +typedef struct PACKED { + /* dword0: */ + uint32_t mustbe0 : 1; + uint32_t src1 : 13; + uint32_t ignore0 : 8; + uint32_t src1_im : 1; + uint32_t src2_im : 1; + uint32_t src2 : 8; + + /* dword1: */ + uint32_t dword1; +} instr_cat6b_t; + +/* dword1 encoding for dst_off: */ +typedef struct PACKED { + /* dword0: */ + uint32_t dword0; + + /* note: there is some weird stuff going on where sometimes + * cat6->a.off is involved.. but that seems like a bug in + * the blob, since it is used even if !cat6->src_off + * It would make sense for there to be some more bits to + * bring us to 11 bits worth of offset, but not sure.. + */ + int32_t off : 8; + uint32_t mustbe1 : 1; + uint32_t dst : 8; + uint32_t pad1 : 15; +} instr_cat6c_t; + +/* dword1 encoding for !dst_off: */ +typedef struct PACKED { + /* dword0: */ + uint32_t dword0; + + uint32_t dst : 8; + uint32_t mustbe0 : 1; + uint32_t idx : 8; + uint32_t pad0 : 15; +} instr_cat6d_t; + +/* ldgb and atomics.. + * + * ldgb: pad0=0, pad3=1 + * atomic .g: pad0=1, pad3=1 + * .l: pad0=1, pad3=0 + */ +typedef struct PACKED { + /* dword0: */ + uint32_t pad0 : 1; + uint32_t src3 : 8; + uint32_t d : 2; + uint32_t typed : 1; + uint32_t type_size : 2; + uint32_t src1 : 8; + uint32_t src1_im : 1; + uint32_t src2_im : 1; + uint32_t src2 : 8; + + /* dword1: */ + uint32_t dst : 8; + uint32_t mustbe0 : 1; + uint32_t src_ssbo : 8; + uint32_t pad2 : 3; // type + uint32_t g : 1; + uint32_t pad3 : 1; + uint32_t pad4 : 10; // opc/jmp_tgt/sync/opc_cat +} instr_cat6ldgb_t; + +/* stgb, pad0=0, pad3=2 + */ +typedef struct PACKED { + /* dword0: */ + uint32_t mustbe1 : 1; // ??? + uint32_t src1 : 8; + uint32_t d : 2; + uint32_t typed : 1; + uint32_t type_size : 2; + uint32_t pad0 : 9; + uint32_t src2_im : 1; + uint32_t src2 : 8; + + /* dword1: */ + uint32_t src3 : 8; + uint32_t src3_im : 1; + uint32_t dst_ssbo : 8; + uint32_t pad2 : 3; // type + uint32_t pad3 : 2; + uint32_t pad4 : 10; // opc/jmp_tgt/sync/opc_cat +} instr_cat6stgb_t; + +typedef union PACKED { + instr_cat6a_t a; + instr_cat6b_t b; + instr_cat6c_t c; + instr_cat6d_t d; + instr_cat6ldgb_t ldgb; + instr_cat6stgb_t stgb; + struct PACKED { + /* dword0: */ + uint32_t src_off : 1; + uint32_t pad1 : 31; + + /* dword1: */ + uint32_t pad2 : 8; + uint32_t dst_off : 1; + uint32_t pad3 : 8; + uint32_t type : 3; + uint32_t g : 1; /* or in some cases it means dst immed */ + uint32_t pad4 : 1; + uint32_t opc : 5; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; + }; +} instr_cat6_t; + +/* Similar to cat5_desc_mode_t, describes how the descriptor is loaded. + */ +typedef enum { + /* Use old GL binding model with an immediate index. */ + CAT6_IMM = 0, + + CAT6_UNIFORM = 1, + + CAT6_NONUNIFORM = 2, + + /* Use the bindless model, with an immediate index. + */ + CAT6_BINDLESS_IMM = 4, + + /* Use the bindless model, with a uniform register index. + */ + CAT6_BINDLESS_UNIFORM = 5, + + /* Use the bindless model, with a register index that isn't guaranteed + * to be uniform. This presumably checks if the indices are equal and + * splits up the load/store, because it works the way you would + * expect. + */ + CAT6_BINDLESS_NONUNIFORM = 6, +} cat6_desc_mode_t; + +/** + * For atomic ops (which return a value): + * + * pad1=1, pad3=c, pad5=3 + * src1 - vecN offset/coords + * src2.x - is actually dest register + * src2.y - is 'data' except for cmpxchg where src2.y is 'compare' + * and src2.z is 'data' + * + * For stib (which does not return a value): + * pad1=0, pad3=c, pad5=2 + * src1 - vecN offset/coords + * src2 - value to store + * + * For ldib: + * pad1=1, pad3=c, pad5=2 + * src1 - vecN offset/coords + * + * for ldc (load from UBO using descriptor): + * pad1=0, pad3=8, pad5=2 + * + * pad2 and pad5 are only observed to be 0. + */ +typedef struct PACKED { + /* dword0: */ + uint32_t pad1 : 1; + uint32_t base : 3; + uint32_t pad2 : 2; + uint32_t desc_mode : 3; + uint32_t d : 2; + uint32_t typed : 1; + uint32_t type_size : 2; + uint32_t opc : 5; + uint32_t pad3 : 5; + uint32_t src1 : 8; /* coordinate/offset */ + + /* dword1: */ + uint32_t src2 : 8; /* or the dst for load instructions */ + uint32_t pad4 : 1; //mustbe0 ?? + uint32_t ssbo : 8; /* ssbo/image binding point */ + uint32_t type : 3; + uint32_t pad5 : 7; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; +} instr_cat6_a6xx_t; + +typedef struct PACKED { + /* dword0: */ + uint32_t pad1 : 32; + + /* dword1: */ + uint32_t pad2 : 12; + uint32_t ss : 1; /* maybe in the encoding, but blob only uses (sy) */ + uint32_t pad3 : 6; + uint32_t w : 1; /* write */ + uint32_t r : 1; /* read */ + uint32_t l : 1; /* local */ + uint32_t g : 1; /* global */ + uint32_t opc : 4; /* presumed, but only a couple known OPCs */ + uint32_t jmp_tgt : 1; /* (jp) */ + uint32_t sync : 1; /* (sy) */ + uint32_t opc_cat : 3; +} instr_cat7_t; + +typedef union PACKED { + instr_cat0_t cat0; + instr_cat1_t cat1; + instr_cat2_t cat2; + instr_cat3_t cat3; + instr_cat4_t cat4; + instr_cat5_t cat5; + instr_cat6_t cat6; + instr_cat6_a6xx_t cat6_a6xx; + instr_cat7_t cat7; + struct PACKED { + /* dword0: */ + uint32_t pad1 : 32; + + /* dword1: */ + uint32_t pad2 : 12; + uint32_t ss : 1; /* cat1-cat4 (cat0??) and cat7 (?) */ + uint32_t ul : 1; /* cat2-cat4 (and cat1 in blob.. which may be bug??) */ + uint32_t pad3 : 13; + uint32_t jmp_tgt : 1; + uint32_t sync : 1; + uint32_t opc_cat : 3; + + }; +} instr_t; + +static inline uint32_t instr_repeat(instr_t *instr) +{ + switch (instr->opc_cat) { + case 0: return instr->cat0.repeat; + case 1: return instr->cat1.repeat; + case 2: return instr->cat2.repeat; + case 3: return instr->cat3.repeat; + case 4: return instr->cat4.repeat; + default: return 0; + } +} + +static inline bool instr_sat(instr_t *instr) +{ + switch (instr->opc_cat) { + case 2: return instr->cat2.sat; + case 3: return instr->cat3.sat; + case 4: return instr->cat4.sat; + default: return false; + } +} + +/* We can probably drop the gpu_id arg, but keeping it for now so we can + * assert if we see something we think should be new encoding on an older + * gpu. + */ +static inline bool is_cat6_legacy(instr_t *instr, unsigned gpu_id) +{ + instr_cat6_a6xx_t *cat6 = &instr->cat6_a6xx; + + /* At least one of these two bits is pad in all the possible + * "legacy" cat6 encodings, and a analysis of all the pre-a6xx + * cmdstream traces I have indicates that the pad bit is zero + * in all cases. So we can use this to detect new encoding: + */ + if ((cat6->pad3 & 0x8) && (cat6->pad5 & 0x2)) { + assert(gpu_id >= 600); + assert(instr->cat6.opc == 0); + return false; + } + + return true; +} + +static inline uint32_t instr_opc(instr_t *instr, unsigned gpu_id) +{ + switch (instr->opc_cat) { + case 0: return instr->cat0.opc | instr->cat0.opc_hi << 4; + case 1: return 0; + case 2: return instr->cat2.opc; + case 3: return instr->cat3.opc; + case 4: return instr->cat4.opc; + case 5: return instr->cat5.opc; + case 6: + if (!is_cat6_legacy(instr, gpu_id)) + return instr->cat6_a6xx.opc; + return instr->cat6.opc; + case 7: return instr->cat7.opc; + default: return 0; + } +} + +static inline bool is_mad(opc_t opc) +{ + switch (opc) { + case OPC_MAD_U16: + case OPC_MAD_S16: + case OPC_MAD_U24: + case OPC_MAD_S24: + case OPC_MAD_F16: + case OPC_MAD_F32: + return true; + default: + return false; + } +} + +static inline bool is_madsh(opc_t opc) +{ + switch (opc) { + case OPC_MADSH_U16: + case OPC_MADSH_M16: + return true; + default: + return false; + } +} + +static inline bool is_atomic(opc_t opc) +{ + switch (opc) { + case OPC_ATOMIC_ADD: + case OPC_ATOMIC_SUB: + case OPC_ATOMIC_XCHG: + case OPC_ATOMIC_INC: + case OPC_ATOMIC_DEC: + case OPC_ATOMIC_CMPXCHG: + case OPC_ATOMIC_MIN: + case OPC_ATOMIC_MAX: + case OPC_ATOMIC_AND: + case OPC_ATOMIC_OR: + case OPC_ATOMIC_XOR: + return true; + default: + return false; + } +} + +static inline bool is_ssbo(opc_t opc) +{ + switch (opc) { + case OPC_RESFMT: + case OPC_RESINFO: + case OPC_LDGB: + case OPC_STGB: + case OPC_STIB: + return true; + default: + return false; + } +} + +static inline bool is_isam(opc_t opc) +{ + switch (opc) { + case OPC_ISAM: + case OPC_ISAML: + case OPC_ISAMM: + return true; + default: + return false; + } +} + + +static inline bool is_cat2_float(opc_t opc) +{ + switch (opc) { + case OPC_ADD_F: + case OPC_MIN_F: + case OPC_MAX_F: + case OPC_MUL_F: + case OPC_SIGN_F: + case OPC_CMPS_F: + case OPC_ABSNEG_F: + case OPC_CMPV_F: + case OPC_FLOOR_F: + case OPC_CEIL_F: + case OPC_RNDNE_F: + case OPC_RNDAZ_F: + case OPC_TRUNC_F: + return true; + + default: + return false; + } +} + +static inline bool is_cat3_float(opc_t opc) +{ + switch (opc) { + case OPC_MAD_F16: + case OPC_MAD_F32: + case OPC_SEL_F16: + case OPC_SEL_F32: + return true; + default: + return false; + } +} + +int disasm_a3xx(uint32_t *dwords, int sizedwords, int level, FILE *out, unsigned gpu_id); + +#endif /* INSTR_A3XX_H_ */ diff --git a/selfdrive/modeld/thneed/debug/decompiler/ir3.h b/selfdrive/modeld/thneed/debug/decompiler/ir3.h new file mode 100644 index 0000000000..278dc96362 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/ir3.h @@ -0,0 +1,1755 @@ +/* + * Copyright (c) 2013 Rob Clark + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef IR3_H_ +#define IR3_H_ + +#include +#include + +#include "shader_enums.h" +#include "util/list.h" + +/*#include "util/bitscan.h" +#include "util/list.h" +#include "util/set.h" +#include "util/u_debug.h"*/ + +#include "instr-a3xx.h" + +/* low level intermediate representation of an adreno shader program */ + +struct ir3_compiler; +struct ir3; +struct ir3_instruction; +struct ir3_block; + +struct ir3_info { + uint32_t gpu_id; + uint16_t sizedwords; + uint16_t instrs_count; /* expanded to account for rpt's */ + uint16_t nops_count; /* # of nop instructions, including nopN */ + uint16_t mov_count; + uint16_t cov_count; + /* NOTE: max_reg, etc, does not include registers not touched + * by the shader (ie. vertex fetched via VFD_DECODE but not + * touched by shader) + */ + int8_t max_reg; /* highest GPR # used by shader */ + int8_t max_half_reg; + int16_t max_const; + + /* number of sync bits: */ + uint16_t ss, sy; + + /* estimate of number of cycles stalled on (ss) */ + uint16_t sstall; + + uint16_t last_baryf; /* instruction # of last varying fetch */ +}; + +struct ir3_register { + enum { + IR3_REG_CONST = 0x001, + IR3_REG_IMMED = 0x002, + IR3_REG_HALF = 0x004, + /* high registers are used for some things in compute shaders, + * for example. Seems to be for things that are global to all + * threads in a wave, so possibly these are global/shared by + * all the threads in the wave? + */ + IR3_REG_HIGH = 0x008, + IR3_REG_RELATIV= 0x010, + IR3_REG_R = 0x020, + /* Most instructions, it seems, can do float abs/neg but not + * integer. The CP pass needs to know what is intended (int or + * float) in order to do the right thing. For this reason the + * abs/neg flags are split out into float and int variants. In + * addition, .b (bitwise) operations, the negate is actually a + * bitwise not, so split that out into a new flag to make it + * more clear. + */ + IR3_REG_FNEG = 0x040, + IR3_REG_FABS = 0x080, + IR3_REG_SNEG = 0x100, + IR3_REG_SABS = 0x200, + IR3_REG_BNOT = 0x400, + IR3_REG_EVEN = 0x800, + IR3_REG_POS_INF= 0x1000, + /* (ei) flag, end-input? Set on last bary, presumably to signal + * that the shader needs no more input: + */ + IR3_REG_EI = 0x2000, + /* meta-flags, for intermediate stages of IR, ie. + * before register assignment is done: + */ + IR3_REG_SSA = 0x4000, /* 'instr' is ptr to assigning instr */ + IR3_REG_ARRAY = 0x8000, + + } flags; + + /* used for cat5 instructions, but also for internal/IR level + * tracking of what registers are read/written by an instruction. + * wrmask may be a bad name since it is used to represent both + * src and dst that touch multiple adjacent registers. + */ + unsigned wrmask : 16; /* up to vec16 */ + + /* for relative addressing, 32bits for array size is too small, + * but otoh we don't need to deal with disjoint sets, so instead + * use a simple size field (number of scalar components). + * + * Note the size field isn't important for relative const (since + * we don't have to do register allocation for constants). + */ + unsigned size : 15; + + bool merged : 1; /* half-regs conflict with full regs (ie >= a6xx) */ + + /* normal registers: + * the component is in the low two bits of the reg #, so + * rN.x becomes: (N << 2) | x + */ + uint16_t num; + union { + /* immediate: */ + int32_t iim_val; + uint32_t uim_val; + float fim_val; + /* relative: */ + struct { + uint16_t id; + int16_t offset; + } array; + }; + + /* For IR3_REG_SSA, src registers contain ptr back to assigning + * instruction. + * + * For IR3_REG_ARRAY, the pointer is back to the last dependent + * array access (although the net effect is the same, it points + * back to a previous instruction that we depend on). + */ + struct ir3_instruction *instr; +}; + +/* + * Stupid/simple growable array implementation: + */ +#define DECLARE_ARRAY(type, name) \ + unsigned name ## _count, name ## _sz; \ + type * name; + +#define array_insert(ctx, arr, val) do { \ + if (arr ## _count == arr ## _sz) { \ + arr ## _sz = MAX2(2 * arr ## _sz, 16); \ + arr = reralloc_size(ctx, arr, arr ## _sz * sizeof(arr[0])); \ + } \ + arr[arr ##_count++] = val; \ + } while (0) + +struct ir3_instruction { + struct ir3_block *block; + opc_t opc; + enum { + /* (sy) flag is set on first instruction, and after sample + * instructions (probably just on RAW hazard). + */ + IR3_INSTR_SY = 0x001, + /* (ss) flag is set on first instruction, and first instruction + * to depend on the result of "long" instructions (RAW hazard): + * + * rcp, rsq, log2, exp2, sin, cos, sqrt + * + * It seems to synchronize until all in-flight instructions are + * completed, for example: + * + * rsq hr1.w, hr1.w + * add.f hr2.z, (neg)hr2.z, hc0.y + * mul.f hr2.w, (neg)hr2.y, (neg)hr2.y + * rsq hr2.x, hr2.x + * (rpt1)nop + * mad.f16 hr2.w, hr2.z, hr2.z, hr2.w + * nop + * mad.f16 hr2.w, (neg)hr0.w, (neg)hr0.w, hr2.w + * (ss)(rpt2)mul.f hr1.x, (r)hr1.x, hr1.w + * (rpt2)mul.f hr0.x, (neg)(r)hr0.x, hr2.x + * + * The last mul.f does not have (ss) set, presumably because the + * (ss) on the previous instruction does the job. + * + * The blob driver also seems to set it on WAR hazards, although + * not really clear if this is needed or just blob compiler being + * sloppy. So far I haven't found a case where removing the (ss) + * causes problems for WAR hazard, but I could just be getting + * lucky: + * + * rcp r1.y, r3.y + * (ss)(rpt2)mad.f32 r3.y, (r)c9.x, r1.x, (r)r3.z + * + */ + IR3_INSTR_SS = 0x002, + /* (jp) flag is set on jump targets: + */ + IR3_INSTR_JP = 0x004, + IR3_INSTR_UL = 0x008, + IR3_INSTR_3D = 0x010, + IR3_INSTR_A = 0x020, + IR3_INSTR_O = 0x040, + IR3_INSTR_P = 0x080, + IR3_INSTR_S = 0x100, + IR3_INSTR_S2EN = 0x200, + IR3_INSTR_G = 0x400, + IR3_INSTR_SAT = 0x800, + /* (cat5/cat6) Bindless */ + IR3_INSTR_B = 0x1000, + /* (cat5-only) Get some parts of the encoding from a1.x */ + IR3_INSTR_A1EN = 0x2000, + /* meta-flags, for intermediate stages of IR, ie. + * before register assignment is done: + */ + IR3_INSTR_MARK = 0x4000, + IR3_INSTR_UNUSED= 0x8000, + } flags; + uint8_t repeat; + uint8_t nop; +#ifdef DEBUG + unsigned regs_max; +#endif + unsigned regs_count; + struct ir3_register **regs; + union { + struct { + char inv; + char comp; + int immed; + struct ir3_block *target; + } cat0; + struct { + type_t src_type, dst_type; + } cat1; + struct { + enum { + IR3_COND_LT = 0, + IR3_COND_LE = 1, + IR3_COND_GT = 2, + IR3_COND_GE = 3, + IR3_COND_EQ = 4, + IR3_COND_NE = 5, + } condition; + } cat2; + struct { + unsigned samp, tex; + unsigned tex_base : 3; + type_t type; + } cat5; + struct { + type_t type; + int src_offset; + int dst_offset; + int iim_val : 3; /* for ldgb/stgb, # of components */ + unsigned d : 3; /* for ldc, component offset */ + bool typed : 1; + unsigned base : 3; + } cat6; + struct { + unsigned w : 1; /* write */ + unsigned r : 1; /* read */ + unsigned l : 1; /* local */ + unsigned g : 1; /* global */ + } cat7; + /* for meta-instructions, just used to hold extra data + * before instruction scheduling, etc + */ + struct { + int off; /* component/offset */ + } split; + struct { + /* for output collects, this maps back to the entry in the + * ir3_shader_variant::outputs table. + */ + int outidx; + } collect; + struct { + unsigned samp, tex; + unsigned input_offset; + unsigned samp_base : 3; + unsigned tex_base : 3; + } prefetch; + struct { + /* maps back to entry in ir3_shader_variant::inputs table: */ + int inidx; + /* for sysvals, identifies the sysval type. Mostly so we can + * identify the special cases where a sysval should not be DCE'd + * (currently, just pre-fs texture fetch) + */ + gl_system_value sysval; + } input; + }; + + /* When we get to the RA stage, we need instruction's position/name: */ + uint16_t ip; + uint16_t name; + + /* used for per-pass extra instruction data. + * + * TODO we should remove the per-pass data like this and 'use_count' + * and do something similar to what RA does w/ ir3_ra_instr_data.. + * ie. use the ir3_count_instructions pass, and then use instr->ip + * to index into a table of pass-private data. + */ + void *data; + + /** + * Valid if pass calls ir3_find_ssa_uses().. see foreach_ssa_use() + */ + struct set *uses; + + int sun; /* Sethi–Ullman number, used by sched */ + int use_count; /* currently just updated/used by cp */ + + /* Used during CP and RA stages. For collect and shader inputs/ + * outputs where we need a sequence of consecutive registers, + * keep track of each src instructions left (ie 'n-1') and right + * (ie 'n+1') neighbor. The front-end must insert enough mov's + * to ensure that each instruction has at most one left and at + * most one right neighbor. During the copy-propagation pass, + * we only remove mov's when we can preserve this constraint. + * And during the RA stage, we use the neighbor information to + * allocate a block of registers in one shot. + * + * TODO: maybe just add something like: + * struct ir3_instruction_ref { + * struct ir3_instruction *instr; + * unsigned cnt; + * } + * + * Or can we get away without the refcnt stuff? It seems like + * it should be overkill.. the problem is if, potentially after + * already eliminating some mov's, if you have a single mov that + * needs to be grouped with it's neighbors in two different + * places (ex. shader output and a collect). + */ + struct { + struct ir3_instruction *left, *right; + uint16_t left_cnt, right_cnt; + } cp; + + /* an instruction can reference at most one address register amongst + * it's src/dst registers. Beyond that, you need to insert mov's. + * + * NOTE: do not write this directly, use ir3_instr_set_address() + */ + struct ir3_instruction *address; + + /* Tracking for additional dependent instructions. Used to handle + * barriers, WAR hazards for arrays/SSBOs/etc. + */ + DECLARE_ARRAY(struct ir3_instruction *, deps); + + /* + * From PoV of instruction scheduling, not execution (ie. ignores global/ + * local distinction): + * shared image atomic SSBO everything + * barrier()/ - R/W R/W R/W R/W X + * groupMemoryBarrier() + * memoryBarrier() - R/W R/W + * (but only images declared coherent?) + * memoryBarrierAtomic() - R/W + * memoryBarrierBuffer() - R/W + * memoryBarrierImage() - R/W + * memoryBarrierShared() - R/W + * + * TODO I think for SSBO/image/shared, in cases where we can determine + * which variable is accessed, we don't need to care about accesses to + * different variables (unless declared coherent??) + */ + enum { + IR3_BARRIER_EVERYTHING = 1 << 0, + IR3_BARRIER_SHARED_R = 1 << 1, + IR3_BARRIER_SHARED_W = 1 << 2, + IR3_BARRIER_IMAGE_R = 1 << 3, + IR3_BARRIER_IMAGE_W = 1 << 4, + IR3_BARRIER_BUFFER_R = 1 << 5, + IR3_BARRIER_BUFFER_W = 1 << 6, + IR3_BARRIER_ARRAY_R = 1 << 7, + IR3_BARRIER_ARRAY_W = 1 << 8, + } barrier_class, barrier_conflict; + + /* Entry in ir3_block's instruction list: */ + struct list_head node; + +#ifdef DEBUG + uint32_t serialno; +#endif + + // TODO only computerator/assembler: + int line; +}; + +static inline struct ir3_instruction * +ir3_neighbor_first(struct ir3_instruction *instr) +{ + int cnt = 0; + while (instr->cp.left) { + instr = instr->cp.left; + if (++cnt > 0xffff) { + debug_assert(0); + break; + } + } + return instr; +} + +static inline int ir3_neighbor_count(struct ir3_instruction *instr) +{ + int num = 1; + + debug_assert(!instr->cp.left); + + while (instr->cp.right) { + num++; + instr = instr->cp.right; + if (num > 0xffff) { + debug_assert(0); + break; + } + } + + return num; +} + +struct ir3 { + struct ir3_compiler *compiler; + gl_shader_stage type; + + DECLARE_ARRAY(struct ir3_instruction *, inputs); + DECLARE_ARRAY(struct ir3_instruction *, outputs); + + /* Track bary.f (and ldlv) instructions.. this is needed in + * scheduling to ensure that all varying fetches happen before + * any potential kill instructions. The hw gets grumpy if all + * threads in a group are killed before the last bary.f gets + * a chance to signal end of input (ei). + */ + DECLARE_ARRAY(struct ir3_instruction *, baryfs); + + /* Track all indirect instructions (read and write). To avoid + * deadlock scenario where an address register gets scheduled, + * but other dependent src instructions cannot be scheduled due + * to dependency on a *different* address register value, the + * scheduler needs to ensure that all dependencies other than + * the instruction other than the address register are scheduled + * before the one that writes the address register. Having a + * convenient list of instructions that reference some address + * register simplifies this. + */ + DECLARE_ARRAY(struct ir3_instruction *, a0_users); + + /* same for a1.x: */ + DECLARE_ARRAY(struct ir3_instruction *, a1_users); + + /* and same for instructions that consume predicate register: */ + DECLARE_ARRAY(struct ir3_instruction *, predicates); + + /* Track texture sample instructions which need texture state + * patched in (for astc-srgb workaround): + */ + DECLARE_ARRAY(struct ir3_instruction *, astc_srgb); + + /* List of blocks: */ + struct list_head block_list; + + /* List of ir3_array's: */ + struct list_head array_list; + + unsigned max_sun; /* max Sethi–Ullman number */ + +#ifdef DEBUG + unsigned block_count, instr_count; +#endif +}; + +struct ir3_array { + struct list_head node; + unsigned length; + unsigned id; + + struct nir_register *r; + + /* To avoid array write's from getting DCE'd, keep track of the + * most recent write. Any array access depends on the most + * recent write. This way, nothing depends on writes after the + * last read. But all the writes that happen before that have + * something depending on them + */ + struct ir3_instruction *last_write; + + /* extra stuff used in RA pass: */ + unsigned base; /* base vreg name */ + unsigned reg; /* base physical reg */ + uint16_t start_ip, end_ip; + + /* Indicates if half-precision */ + bool half; +}; + +struct ir3_array * ir3_lookup_array(struct ir3 *ir, unsigned id); + +struct ir3_block { + struct list_head node; + struct ir3 *shader; + + const struct nir_block *nblock; + + struct list_head instr_list; /* list of ir3_instruction */ + + /* each block has either one or two successors.. in case of + * two successors, 'condition' decides which one to follow. + * A block preceding an if/else has two successors. + */ + struct ir3_instruction *condition; + struct ir3_block *successors[2]; + + struct set *predecessors; /* set of ir3_block */ + + uint16_t start_ip, end_ip; + + /* Track instructions which do not write a register but other- + * wise must not be discarded (such as kill, stg, etc) + */ + DECLARE_ARRAY(struct ir3_instruction *, keeps); + + /* used for per-pass extra block data. Mainly used right + * now in RA step to track livein/liveout. + */ + void *data; + +#ifdef DEBUG + uint32_t serialno; +#endif +}; + +static inline uint32_t +block_id(struct ir3_block *block) +{ +#ifdef DEBUG + return block->serialno; +#else + return (uint32_t)(unsigned long)block; +#endif +} + +struct ir3 * ir3_create(struct ir3_compiler *compiler, gl_shader_stage type); +void ir3_destroy(struct ir3 *shader); +void * ir3_assemble(struct ir3 *shader, + struct ir3_info *info, uint32_t gpu_id); +void * ir3_alloc(struct ir3 *shader, int sz); + +struct ir3_block * ir3_block_create(struct ir3 *shader); + +struct ir3_instruction * ir3_instr_create(struct ir3_block *block, opc_t opc); +struct ir3_instruction * ir3_instr_create2(struct ir3_block *block, + opc_t opc, int nreg); +struct ir3_instruction * ir3_instr_clone(struct ir3_instruction *instr); +void ir3_instr_add_dep(struct ir3_instruction *instr, struct ir3_instruction *dep); +const char *ir3_instr_name(struct ir3_instruction *instr); + +struct ir3_register * ir3_reg_create(struct ir3_instruction *instr, + int num, int flags); +struct ir3_register * ir3_reg_clone(struct ir3 *shader, + struct ir3_register *reg); + +void ir3_instr_set_address(struct ir3_instruction *instr, + struct ir3_instruction *addr); + +static inline bool ir3_instr_check_mark(struct ir3_instruction *instr) +{ + if (instr->flags & IR3_INSTR_MARK) + return true; /* already visited */ + instr->flags |= IR3_INSTR_MARK; + return false; +} + +void ir3_block_clear_mark(struct ir3_block *block); +void ir3_clear_mark(struct ir3 *shader); + +unsigned ir3_count_instructions(struct ir3 *ir); +unsigned ir3_count_instructions_ra(struct ir3 *ir); + +void ir3_find_ssa_uses(struct ir3 *ir, void *mem_ctx, bool falsedeps); + +//#include "util/set.h" +#define foreach_ssa_use(__use, __instr) \ + for (struct ir3_instruction *__use = (void *)~0; \ + __use && (__instr)->uses; __use = NULL) \ + set_foreach ((__instr)->uses, __entry) \ + if ((__use = (void *)__entry->key)) + +#define MAX_ARRAYS 16 + +/* comp: + * 0 - x + * 1 - y + * 2 - z + * 3 - w + */ +static inline uint32_t regid(int num, int comp) +{ + return (num << 2) | (comp & 0x3); +} + +static inline uint32_t reg_num(struct ir3_register *reg) +{ + return reg->num >> 2; +} + +static inline uint32_t reg_comp(struct ir3_register *reg) +{ + return reg->num & 0x3; +} + +#define INVALID_REG regid(63, 0) +#define VALIDREG(r) ((r) != INVALID_REG) +#define CONDREG(r, val) COND(VALIDREG(r), (val)) + +static inline bool is_flow(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == 0); +} + +static inline bool is_kill(struct ir3_instruction *instr) +{ + return instr->opc == OPC_KILL; +} + +static inline bool is_nop(struct ir3_instruction *instr) +{ + return instr->opc == OPC_NOP; +} + +static inline bool is_same_type_reg(struct ir3_register *reg1, + struct ir3_register *reg2) +{ + unsigned type_reg1 = (reg1->flags & (IR3_REG_HIGH | IR3_REG_HALF)); + unsigned type_reg2 = (reg2->flags & (IR3_REG_HIGH | IR3_REG_HALF)); + + if (type_reg1 ^ type_reg2) + return false; + else + return true; +} + +/* Is it a non-transformative (ie. not type changing) mov? This can + * also include absneg.s/absneg.f, which for the most part can be + * treated as a mov (single src argument). + */ +static inline bool is_same_type_mov(struct ir3_instruction *instr) +{ + struct ir3_register *dst; + + switch (instr->opc) { + case OPC_MOV: + if (instr->cat1.src_type != instr->cat1.dst_type) + return false; + /* If the type of dest reg and src reg are different, + * it shouldn't be considered as same type mov + */ + if (!is_same_type_reg(instr->regs[0], instr->regs[1])) + return false; + break; + case OPC_ABSNEG_F: + case OPC_ABSNEG_S: + if (instr->flags & IR3_INSTR_SAT) + return false; + /* If the type of dest reg and src reg are different, + * it shouldn't be considered as same type mov + */ + if (!is_same_type_reg(instr->regs[0], instr->regs[1])) + return false; + break; + default: + return false; + } + + dst = instr->regs[0]; + + /* mov's that write to a0 or p0.x are special: */ + if (dst->num == regid(REG_P0, 0)) + return false; + if (reg_num(dst) == REG_A0) + return false; + + if (dst->flags & (IR3_REG_RELATIV | IR3_REG_ARRAY)) + return false; + + return true; +} + +/* A move from const, which changes size but not type, can also be + * folded into dest instruction in some cases. + */ +static inline bool is_const_mov(struct ir3_instruction *instr) +{ + if (instr->opc != OPC_MOV) + return false; + + if (!(instr->regs[1]->flags & IR3_REG_CONST)) + return false; + + type_t src_type = instr->cat1.src_type; + type_t dst_type = instr->cat1.dst_type; + + return (type_float(src_type) && type_float(dst_type)) || + (type_uint(src_type) && type_uint(dst_type)) || + (type_sint(src_type) && type_sint(dst_type)); +} + +static inline bool is_alu(struct ir3_instruction *instr) +{ + return (1 <= opc_cat(instr->opc)) && (opc_cat(instr->opc) <= 3); +} + +static inline bool is_sfu(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == 4); +} + +static inline bool is_tex(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == 5); +} + +static inline bool is_tex_or_prefetch(struct ir3_instruction *instr) +{ + return is_tex(instr) || (instr->opc == OPC_META_TEX_PREFETCH); +} + +static inline bool is_mem(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == 6); +} + +static inline bool is_barrier(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == 7); +} + +static inline bool +is_half(struct ir3_instruction *instr) +{ + return !!(instr->regs[0]->flags & IR3_REG_HALF); +} + +static inline bool +is_high(struct ir3_instruction *instr) +{ + return !!(instr->regs[0]->flags & IR3_REG_HIGH); +} + +static inline bool +is_store(struct ir3_instruction *instr) +{ + /* these instructions, the "destination" register is + * actually a source, the address to store to. + */ + switch (instr->opc) { + case OPC_STG: + case OPC_STGB: + case OPC_STIB: + case OPC_STP: + case OPC_STL: + case OPC_STLW: + case OPC_L2G: + case OPC_G2L: + return true; + default: + return false; + } +} + +static inline bool is_load(struct ir3_instruction *instr) +{ + switch (instr->opc) { + case OPC_LDG: + case OPC_LDGB: + case OPC_LDIB: + case OPC_LDL: + case OPC_LDP: + case OPC_L2G: + case OPC_LDLW: + case OPC_LDC: + case OPC_LDLV: + /* probably some others too.. */ + return true; + default: + return false; + } +} + +static inline bool is_input(struct ir3_instruction *instr) +{ + /* in some cases, ldlv is used to fetch varying without + * interpolation.. fortunately inloc is the first src + * register in either case + */ + switch (instr->opc) { + case OPC_LDLV: + case OPC_BARY_F: + return true; + default: + return false; + } +} + +static inline bool is_bool(struct ir3_instruction *instr) +{ + switch (instr->opc) { + case OPC_CMPS_F: + case OPC_CMPS_S: + case OPC_CMPS_U: + return true; + default: + return false; + } +} + +static inline bool is_meta(struct ir3_instruction *instr) +{ + return (opc_cat(instr->opc) == -1); +} + +static inline unsigned dest_regs(struct ir3_instruction *instr) +{ + if ((instr->regs_count == 0) || is_store(instr) || is_flow(instr)) + return 0; + + return util_last_bit(instr->regs[0]->wrmask); +} + +static inline bool +writes_gpr(struct ir3_instruction *instr) +{ + if (dest_regs(instr) == 0) + return false; + /* is dest a normal temp register: */ + struct ir3_register *reg = instr->regs[0]; + debug_assert(!(reg->flags & (IR3_REG_CONST | IR3_REG_IMMED))); + if ((reg_num(reg) == REG_A0) || + (reg->num == regid(REG_P0, 0))) + return false; + return true; +} + +static inline bool writes_addr0(struct ir3_instruction *instr) +{ + if (instr->regs_count > 0) { + struct ir3_register *dst = instr->regs[0]; + return dst->num == regid(REG_A0, 0); + } + return false; +} + +static inline bool writes_addr1(struct ir3_instruction *instr) +{ + if (instr->regs_count > 0) { + struct ir3_register *dst = instr->regs[0]; + return dst->num == regid(REG_A0, 1); + } + return false; +} + +static inline bool writes_pred(struct ir3_instruction *instr) +{ + if (instr->regs_count > 0) { + struct ir3_register *dst = instr->regs[0]; + return reg_num(dst) == REG_P0; + } + return false; +} + +/* returns defining instruction for reg */ +/* TODO better name */ +static inline struct ir3_instruction *ssa(struct ir3_register *reg) +{ + if (reg->flags & (IR3_REG_SSA | IR3_REG_ARRAY)) { + return reg->instr; + } + return NULL; +} + +static inline bool conflicts(struct ir3_instruction *a, + struct ir3_instruction *b) +{ + return (a && b) && (a != b); +} + +static inline bool reg_gpr(struct ir3_register *r) +{ + if (r->flags & (IR3_REG_CONST | IR3_REG_IMMED)) + return false; + if ((reg_num(r) == REG_A0) || (reg_num(r) == REG_P0)) + return false; + return true; +} + +static inline type_t half_type(type_t type) +{ + switch (type) { + case TYPE_F32: return TYPE_F16; + case TYPE_U32: return TYPE_U16; + case TYPE_S32: return TYPE_S16; + case TYPE_F16: + case TYPE_U16: + case TYPE_S16: + return type; + default: + assert(0); + return ~0; + } +} + +/* some cat2 instructions (ie. those which are not float) can embed an + * immediate: + */ +static inline bool ir3_cat2_int(opc_t opc) +{ + switch (opc) { + case OPC_ADD_U: + case OPC_ADD_S: + case OPC_SUB_U: + case OPC_SUB_S: + case OPC_CMPS_U: + case OPC_CMPS_S: + case OPC_MIN_U: + case OPC_MIN_S: + case OPC_MAX_U: + case OPC_MAX_S: + case OPC_CMPV_U: + case OPC_CMPV_S: + case OPC_MUL_U24: + case OPC_MUL_S24: + case OPC_MULL_U: + case OPC_CLZ_S: + case OPC_ABSNEG_S: + case OPC_AND_B: + case OPC_OR_B: + case OPC_NOT_B: + case OPC_XOR_B: + case OPC_BFREV_B: + case OPC_CLZ_B: + case OPC_SHL_B: + case OPC_SHR_B: + case OPC_ASHR_B: + case OPC_MGEN_B: + case OPC_GETBIT_B: + case OPC_CBITS_B: + case OPC_BARY_F: + return true; + + default: + return false; + } +} + +/* map cat2 instruction to valid abs/neg flags: */ +static inline unsigned ir3_cat2_absneg(opc_t opc) +{ + switch (opc) { + case OPC_ADD_F: + case OPC_MIN_F: + case OPC_MAX_F: + case OPC_MUL_F: + case OPC_SIGN_F: + case OPC_CMPS_F: + case OPC_ABSNEG_F: + case OPC_CMPV_F: + case OPC_FLOOR_F: + case OPC_CEIL_F: + case OPC_RNDNE_F: + case OPC_RNDAZ_F: + case OPC_TRUNC_F: + case OPC_BARY_F: + return IR3_REG_FABS | IR3_REG_FNEG; + + case OPC_ADD_U: + case OPC_ADD_S: + case OPC_SUB_U: + case OPC_SUB_S: + case OPC_CMPS_U: + case OPC_CMPS_S: + case OPC_MIN_U: + case OPC_MIN_S: + case OPC_MAX_U: + case OPC_MAX_S: + case OPC_CMPV_U: + case OPC_CMPV_S: + case OPC_MUL_U24: + case OPC_MUL_S24: + case OPC_MULL_U: + case OPC_CLZ_S: + return 0; + + case OPC_ABSNEG_S: + return IR3_REG_SABS | IR3_REG_SNEG; + + case OPC_AND_B: + case OPC_OR_B: + case OPC_NOT_B: + case OPC_XOR_B: + case OPC_BFREV_B: + case OPC_CLZ_B: + case OPC_SHL_B: + case OPC_SHR_B: + case OPC_ASHR_B: + case OPC_MGEN_B: + case OPC_GETBIT_B: + case OPC_CBITS_B: + return IR3_REG_BNOT; + + default: + return 0; + } +} + +/* map cat3 instructions to valid abs/neg flags: */ +static inline unsigned ir3_cat3_absneg(opc_t opc) +{ + switch (opc) { + case OPC_MAD_F16: + case OPC_MAD_F32: + case OPC_SEL_F16: + case OPC_SEL_F32: + return IR3_REG_FNEG; + + case OPC_MAD_U16: + case OPC_MADSH_U16: + case OPC_MAD_S16: + case OPC_MADSH_M16: + case OPC_MAD_U24: + case OPC_MAD_S24: + case OPC_SEL_S16: + case OPC_SEL_S32: + case OPC_SAD_S16: + case OPC_SAD_S32: + /* neg *may* work on 3rd src.. */ + + case OPC_SEL_B16: + case OPC_SEL_B32: + + default: + return 0; + } +} + +#define MASK(n) ((1 << (n)) - 1) + +/* iterator for an instructions's sources (reg), also returns src #: */ +#define foreach_src_n(__srcreg, __n, __instr) \ + if ((__instr)->regs_count) \ + for (unsigned __cnt = (__instr)->regs_count - 1, __n = 0; __n < __cnt; __n++) \ + if ((__srcreg = (__instr)->regs[__n + 1])) + +/* iterator for an instructions's sources (reg): */ +#define foreach_src(__srcreg, __instr) \ + foreach_src_n(__srcreg, __i, __instr) + +static inline unsigned __ssa_src_cnt(struct ir3_instruction *instr) +{ + unsigned cnt = instr->regs_count + instr->deps_count; + if (instr->address) + cnt++; + return cnt; +} + +static inline struct ir3_instruction ** +__ssa_srcp_n(struct ir3_instruction *instr, unsigned n) +{ + if (n == (instr->regs_count + instr->deps_count)) + return &instr->address; + if (n >= instr->regs_count) + return &instr->deps[n - instr->regs_count]; + if (ssa(instr->regs[n])) + return &instr->regs[n]->instr; + return NULL; +} + +static inline bool __is_false_dep(struct ir3_instruction *instr, unsigned n) +{ + if (n == (instr->regs_count + instr->deps_count)) + return false; + if (n >= instr->regs_count) + return true; + return false; +} + +#define foreach_ssa_srcp_n(__srcp, __n, __instr) \ + for (struct ir3_instruction **__srcp = (void *)~0; __srcp; __srcp = NULL) \ + for (unsigned __cnt = __ssa_src_cnt(__instr), __n = 0; __n < __cnt; __n++) \ + if ((__srcp = __ssa_srcp_n(__instr, __n))) + +#define foreach_ssa_srcp(__srcp, __instr) \ + foreach_ssa_srcp_n(__srcp, __i, __instr) + +/* iterator for an instruction's SSA sources (instr), also returns src #: */ +#define foreach_ssa_src_n(__srcinst, __n, __instr) \ + foreach_ssa_srcp_n(__srcp, __n, __instr) \ + if ((__srcinst = *__srcp)) + +/* iterator for an instruction's SSA sources (instr): */ +#define foreach_ssa_src(__srcinst, __instr) \ + foreach_ssa_src_n(__srcinst, __i, __instr) + +/* iterators for shader inputs: */ +#define foreach_input_n(__ininstr, __cnt, __ir) \ + for (unsigned __cnt = 0; __cnt < (__ir)->inputs_count; __cnt++) \ + if ((__ininstr = (__ir)->inputs[__cnt])) +#define foreach_input(__ininstr, __ir) \ + foreach_input_n(__ininstr, __i, __ir) + +/* iterators for shader outputs: */ +#define foreach_output_n(__outinstr, __cnt, __ir) \ + for (unsigned __cnt = 0; __cnt < (__ir)->outputs_count; __cnt++) \ + if ((__outinstr = (__ir)->outputs[__cnt])) +#define foreach_output(__outinstr, __ir) \ + foreach_output_n(__outinstr, __i, __ir) + +/* iterators for instructions: */ +#define foreach_instr(__instr, __list) \ + list_for_each_entry(struct ir3_instruction, __instr, __list, node) +#define foreach_instr_rev(__instr, __list) \ + list_for_each_entry_rev(struct ir3_instruction, __instr, __list, node) +#define foreach_instr_safe(__instr, __list) \ + list_for_each_entry_safe(struct ir3_instruction, __instr, __list, node) + +/* iterators for blocks: */ +#define foreach_block(__block, __list) \ + list_for_each_entry(struct ir3_block, __block, __list, node) +#define foreach_block_safe(__block, __list) \ + list_for_each_entry_safe(struct ir3_block, __block, __list, node) +#define foreach_block_rev(__block, __list) \ + list_for_each_entry_rev(struct ir3_block, __block, __list, node) + +/* iterators for arrays: */ +#define foreach_array(__array, __list) \ + list_for_each_entry(struct ir3_array, __array, __list, node) + +/* Check if condition is true for any src instruction. + */ +static inline bool +check_src_cond(struct ir3_instruction *instr, bool (*cond)(struct ir3_instruction *)) +{ + struct ir3_register *reg; + + /* Note that this is also used post-RA so skip the ssa iterator: */ + foreach_src (reg, instr) { + struct ir3_instruction *src = reg->instr; + + if (!src) + continue; + + /* meta:split/collect aren't real instructions, the thing that + * we actually care about is *their* srcs + */ + if ((src->opc == OPC_META_SPLIT) || (src->opc == OPC_META_COLLECT)) { + if (check_src_cond(src, cond)) + return true; + } else { + if (cond(src)) + return true; + } + } + + return false; +} + +/* dump: */ +void ir3_print(struct ir3 *ir); +void ir3_print_instr(struct ir3_instruction *instr); + +/* delay calculation: */ +int ir3_delayslots(struct ir3_instruction *assigner, + struct ir3_instruction *consumer, unsigned n, bool soft); +unsigned ir3_delay_calc(struct ir3_block *block, struct ir3_instruction *instr, + bool soft, bool pred); +void ir3_remove_nops(struct ir3 *ir); + +/* dead code elimination: */ +struct ir3_shader_variant; +void ir3_dce(struct ir3 *ir, struct ir3_shader_variant *so); + +/* fp16 conversion folding */ +void ir3_cf(struct ir3 *ir); + +/* copy-propagate: */ +void ir3_cp(struct ir3 *ir, struct ir3_shader_variant *so); + +/* group neighbors and insert mov's to resolve conflicts: */ +void ir3_group(struct ir3 *ir); + +/* Sethi–Ullman numbering: */ +void ir3_sun(struct ir3 *ir); + +/* scheduling: */ +void ir3_sched_add_deps(struct ir3 *ir); +int ir3_sched(struct ir3 *ir); + +struct ir3_context; +int ir3_postsched(struct ir3_context *ctx); + +bool ir3_a6xx_fixup_atomic_dests(struct ir3 *ir, struct ir3_shader_variant *so); + +/* register assignment: */ +struct ir3_ra_reg_set * ir3_ra_alloc_reg_set(struct ir3_compiler *compiler); +int ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor, unsigned nprecolor); + +/* legalize: */ +void ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary); + +static inline bool +ir3_has_latency_to_hide(struct ir3 *ir) +{ + /* VS/GS/TCS/TESS co-exist with frag shader invocations, but we don't + * know the nature of the fragment shader. Just assume it will have + * latency to hide: + */ + if (ir->type != MESA_SHADER_FRAGMENT) + return true; + + foreach_block (block, &ir->block_list) { + foreach_instr (instr, &block->instr_list) { + if (is_tex_or_prefetch(instr)) + return true; + + if (is_load(instr)) { + switch (instr->opc) { + case OPC_LDLV: + case OPC_LDL: + case OPC_LDLW: + break; + default: + return true; + } + } + } + } + + return false; +} + +/* ************************************************************************* */ +/* instruction helpers */ + +/* creates SSA src of correct type (ie. half vs full precision) */ +static inline struct ir3_register * __ssa_src(struct ir3_instruction *instr, + struct ir3_instruction *src, unsigned flags) +{ + struct ir3_register *reg; + if (src->regs[0]->flags & IR3_REG_HALF) + flags |= IR3_REG_HALF; + reg = ir3_reg_create(instr, 0, IR3_REG_SSA | flags); + reg->instr = src; + reg->wrmask = src->regs[0]->wrmask; + return reg; +} + +static inline struct ir3_register * __ssa_dst(struct ir3_instruction *instr) +{ + struct ir3_register *reg = ir3_reg_create(instr, 0, 0); + reg->flags |= IR3_REG_SSA; + return reg; +} + +static inline struct ir3_instruction * +create_immed_typed(struct ir3_block *block, uint32_t val, type_t type) +{ + struct ir3_instruction *mov; + unsigned flags = (type_size(type) < 32) ? IR3_REG_HALF : 0; + + mov = ir3_instr_create(block, OPC_MOV); + mov->cat1.src_type = type; + mov->cat1.dst_type = type; + __ssa_dst(mov)->flags |= flags; + ir3_reg_create(mov, 0, IR3_REG_IMMED | flags)->uim_val = val; + + return mov; +} + +static inline struct ir3_instruction * +create_immed(struct ir3_block *block, uint32_t val) +{ + return create_immed_typed(block, val, TYPE_U32); +} + +static inline struct ir3_instruction * +create_uniform_typed(struct ir3_block *block, unsigned n, type_t type) +{ + struct ir3_instruction *mov; + unsigned flags = (type_size(type) < 32) ? IR3_REG_HALF : 0; + + mov = ir3_instr_create(block, OPC_MOV); + mov->cat1.src_type = type; + mov->cat1.dst_type = type; + __ssa_dst(mov)->flags |= flags; + ir3_reg_create(mov, n, IR3_REG_CONST | flags); + + return mov; +} + +static inline struct ir3_instruction * +create_uniform(struct ir3_block *block, unsigned n) +{ + return create_uniform_typed(block, n, TYPE_F32); +} + +static inline struct ir3_instruction * +create_uniform_indirect(struct ir3_block *block, int n, + struct ir3_instruction *address) +{ + struct ir3_instruction *mov; + + mov = ir3_instr_create(block, OPC_MOV); + mov->cat1.src_type = TYPE_U32; + mov->cat1.dst_type = TYPE_U32; + __ssa_dst(mov); + ir3_reg_create(mov, 0, IR3_REG_CONST | IR3_REG_RELATIV)->array.offset = n; + + ir3_instr_set_address(mov, address); + + return mov; +} + +static inline struct ir3_instruction * +ir3_MOV(struct ir3_block *block, struct ir3_instruction *src, type_t type) +{ + struct ir3_instruction *instr = ir3_instr_create(block, OPC_MOV); + __ssa_dst(instr); + if (src->regs[0]->flags & IR3_REG_ARRAY) { + struct ir3_register *src_reg = __ssa_src(instr, src, IR3_REG_ARRAY); + src_reg->array = src->regs[0]->array; + } else { + __ssa_src(instr, src, src->regs[0]->flags & IR3_REG_HIGH); + } + debug_assert(!(src->regs[0]->flags & IR3_REG_RELATIV)); + instr->cat1.src_type = type; + instr->cat1.dst_type = type; + return instr; +} + +static inline struct ir3_instruction * +ir3_COV(struct ir3_block *block, struct ir3_instruction *src, + type_t src_type, type_t dst_type) +{ + struct ir3_instruction *instr = ir3_instr_create(block, OPC_MOV); + unsigned dst_flags = (type_size(dst_type) < 32) ? IR3_REG_HALF : 0; + unsigned src_flags = (type_size(src_type) < 32) ? IR3_REG_HALF : 0; + + debug_assert((src->regs[0]->flags & IR3_REG_HALF) == src_flags); + + __ssa_dst(instr)->flags |= dst_flags; + __ssa_src(instr, src, 0); + instr->cat1.src_type = src_type; + instr->cat1.dst_type = dst_type; + debug_assert(!(src->regs[0]->flags & IR3_REG_ARRAY)); + return instr; +} + +static inline struct ir3_instruction * +ir3_NOP(struct ir3_block *block) +{ + return ir3_instr_create(block, OPC_NOP); +} + +#define IR3_INSTR_0 0 + +#define __INSTR0(flag, name, opc) \ +static inline struct ir3_instruction * \ +ir3_##name(struct ir3_block *block) \ +{ \ + struct ir3_instruction *instr = \ + ir3_instr_create(block, opc); \ + instr->flags |= flag; \ + return instr; \ +} +#define INSTR0F(f, name) __INSTR0(IR3_INSTR_##f, name##_##f, OPC_##name) +#define INSTR0(name) __INSTR0(0, name, OPC_##name) + +#define __INSTR1(flag, name, opc) \ +static inline struct ir3_instruction * \ +ir3_##name(struct ir3_block *block, \ + struct ir3_instruction *a, unsigned aflags) \ +{ \ + struct ir3_instruction *instr = \ + ir3_instr_create(block, opc); \ + __ssa_dst(instr); \ + __ssa_src(instr, a, aflags); \ + instr->flags |= flag; \ + return instr; \ +} +#define INSTR1F(f, name) __INSTR1(IR3_INSTR_##f, name##_##f, OPC_##name) +#define INSTR1(name) __INSTR1(0, name, OPC_##name) + +#define __INSTR2(flag, name, opc) \ +static inline struct ir3_instruction * \ +ir3_##name(struct ir3_block *block, \ + struct ir3_instruction *a, unsigned aflags, \ + struct ir3_instruction *b, unsigned bflags) \ +{ \ + struct ir3_instruction *instr = \ + ir3_instr_create(block, opc); \ + __ssa_dst(instr); \ + __ssa_src(instr, a, aflags); \ + __ssa_src(instr, b, bflags); \ + instr->flags |= flag; \ + return instr; \ +} +#define INSTR2F(f, name) __INSTR2(IR3_INSTR_##f, name##_##f, OPC_##name) +#define INSTR2(name) __INSTR2(0, name, OPC_##name) + +#define __INSTR3(flag, name, opc) \ +static inline struct ir3_instruction * \ +ir3_##name(struct ir3_block *block, \ + struct ir3_instruction *a, unsigned aflags, \ + struct ir3_instruction *b, unsigned bflags, \ + struct ir3_instruction *c, unsigned cflags) \ +{ \ + struct ir3_instruction *instr = \ + ir3_instr_create2(block, opc, 4); \ + __ssa_dst(instr); \ + __ssa_src(instr, a, aflags); \ + __ssa_src(instr, b, bflags); \ + __ssa_src(instr, c, cflags); \ + instr->flags |= flag; \ + return instr; \ +} +#define INSTR3F(f, name) __INSTR3(IR3_INSTR_##f, name##_##f, OPC_##name) +#define INSTR3(name) __INSTR3(0, name, OPC_##name) + +#define __INSTR4(flag, name, opc) \ +static inline struct ir3_instruction * \ +ir3_##name(struct ir3_block *block, \ + struct ir3_instruction *a, unsigned aflags, \ + struct ir3_instruction *b, unsigned bflags, \ + struct ir3_instruction *c, unsigned cflags, \ + struct ir3_instruction *d, unsigned dflags) \ +{ \ + struct ir3_instruction *instr = \ + ir3_instr_create2(block, opc, 5); \ + __ssa_dst(instr); \ + __ssa_src(instr, a, aflags); \ + __ssa_src(instr, b, bflags); \ + __ssa_src(instr, c, cflags); \ + __ssa_src(instr, d, dflags); \ + instr->flags |= flag; \ + return instr; \ +} +#define INSTR4F(f, name) __INSTR4(IR3_INSTR_##f, name##_##f, OPC_##name) +#define INSTR4(name) __INSTR4(0, name, OPC_##name) + +/* cat0 instructions: */ +INSTR1(B) +INSTR0(JUMP) +INSTR1(KILL) +INSTR0(END) +INSTR0(CHSH) +INSTR0(CHMASK) +INSTR1(PREDT) +INSTR0(PREDF) +INSTR0(PREDE) + +/* cat2 instructions, most 2 src but some 1 src: */ +INSTR2(ADD_F) +INSTR2(MIN_F) +INSTR2(MAX_F) +INSTR2(MUL_F) +INSTR1(SIGN_F) +INSTR2(CMPS_F) +INSTR1(ABSNEG_F) +INSTR2(CMPV_F) +INSTR1(FLOOR_F) +INSTR1(CEIL_F) +INSTR1(RNDNE_F) +INSTR1(RNDAZ_F) +INSTR1(TRUNC_F) +INSTR2(ADD_U) +INSTR2(ADD_S) +INSTR2(SUB_U) +INSTR2(SUB_S) +INSTR2(CMPS_U) +INSTR2(CMPS_S) +INSTR2(MIN_U) +INSTR2(MIN_S) +INSTR2(MAX_U) +INSTR2(MAX_S) +INSTR1(ABSNEG_S) +INSTR2(AND_B) +INSTR2(OR_B) +INSTR1(NOT_B) +INSTR2(XOR_B) +INSTR2(CMPV_U) +INSTR2(CMPV_S) +INSTR2(MUL_U24) +INSTR2(MUL_S24) +INSTR2(MULL_U) +INSTR1(BFREV_B) +INSTR1(CLZ_S) +INSTR1(CLZ_B) +INSTR2(SHL_B) +INSTR2(SHR_B) +INSTR2(ASHR_B) +INSTR2(BARY_F) +INSTR2(MGEN_B) +INSTR2(GETBIT_B) +INSTR1(SETRM) +INSTR1(CBITS_B) +INSTR2(SHB) +INSTR2(MSAD) + +/* cat3 instructions: */ +INSTR3(MAD_U16) +INSTR3(MADSH_U16) +INSTR3(MAD_S16) +INSTR3(MADSH_M16) +INSTR3(MAD_U24) +INSTR3(MAD_S24) +INSTR3(MAD_F16) +INSTR3(MAD_F32) +/* NOTE: SEL_B32 checks for zero vs nonzero */ +INSTR3(SEL_B16) +INSTR3(SEL_B32) +INSTR3(SEL_S16) +INSTR3(SEL_S32) +INSTR3(SEL_F16) +INSTR3(SEL_F32) +INSTR3(SAD_S16) +INSTR3(SAD_S32) + +/* cat4 instructions: */ +INSTR1(RCP) +INSTR1(RSQ) +INSTR1(HRSQ) +INSTR1(LOG2) +INSTR1(HLOG2) +INSTR1(EXP2) +INSTR1(HEXP2) +INSTR1(SIN) +INSTR1(COS) +INSTR1(SQRT) + +/* cat5 instructions: */ +INSTR1(DSX) +INSTR1(DSXPP_1) +INSTR1(DSY) +INSTR1(DSYPP_1) +INSTR1F(3D, DSX) +INSTR1F(3D, DSY) +INSTR1(RGETPOS) + +static inline struct ir3_instruction * +ir3_SAM(struct ir3_block *block, opc_t opc, type_t type, + unsigned wrmask, unsigned flags, struct ir3_instruction *samp_tex, + struct ir3_instruction *src0, struct ir3_instruction *src1) +{ + struct ir3_instruction *sam; + + sam = ir3_instr_create(block, opc); + sam->flags |= flags; + __ssa_dst(sam)->wrmask = wrmask; + if (flags & IR3_INSTR_S2EN) { + __ssa_src(sam, samp_tex, IR3_REG_HALF); + } + if (src0) { + __ssa_src(sam, src0, 0); + } + if (src1) { + __ssa_src(sam, src1, 0); + } + sam->cat5.type = type; + + return sam; +} + +/* cat6 instructions: */ +INSTR2(LDLV) +INSTR3(LDG) +INSTR3(LDL) +INSTR3(LDLW) +INSTR3(STG) +INSTR3(STL) +INSTR3(STLW) +INSTR1(RESINFO) +INSTR1(RESFMT) +INSTR2(ATOMIC_ADD) +INSTR2(ATOMIC_SUB) +INSTR2(ATOMIC_XCHG) +INSTR2(ATOMIC_INC) +INSTR2(ATOMIC_DEC) +INSTR2(ATOMIC_CMPXCHG) +INSTR2(ATOMIC_MIN) +INSTR2(ATOMIC_MAX) +INSTR2(ATOMIC_AND) +INSTR2(ATOMIC_OR) +INSTR2(ATOMIC_XOR) +INSTR2(LDC) +#if GPU >= 600 +INSTR3(STIB); +INSTR2(LDIB); +INSTR3F(G, ATOMIC_ADD) +INSTR3F(G, ATOMIC_SUB) +INSTR3F(G, ATOMIC_XCHG) +INSTR3F(G, ATOMIC_INC) +INSTR3F(G, ATOMIC_DEC) +INSTR3F(G, ATOMIC_CMPXCHG) +INSTR3F(G, ATOMIC_MIN) +INSTR3F(G, ATOMIC_MAX) +INSTR3F(G, ATOMIC_AND) +INSTR3F(G, ATOMIC_OR) +INSTR3F(G, ATOMIC_XOR) +#elif GPU >= 400 +INSTR3(LDGB) +INSTR4(STGB) +INSTR4(STIB) +INSTR4F(G, ATOMIC_ADD) +INSTR4F(G, ATOMIC_SUB) +INSTR4F(G, ATOMIC_XCHG) +INSTR4F(G, ATOMIC_INC) +INSTR4F(G, ATOMIC_DEC) +INSTR4F(G, ATOMIC_CMPXCHG) +INSTR4F(G, ATOMIC_MIN) +INSTR4F(G, ATOMIC_MAX) +INSTR4F(G, ATOMIC_AND) +INSTR4F(G, ATOMIC_OR) +INSTR4F(G, ATOMIC_XOR) +#endif + +INSTR4F(G, STG) + +/* cat7 instructions: */ +INSTR0(BAR) +INSTR0(FENCE) + +/* meta instructions: */ +INSTR0(META_TEX_PREFETCH); + +/* ************************************************************************* */ +/* split this out or find some helper to use.. like main/bitset.h.. */ + +#include +#include "util/bitset.h" + +#define MAX_REG 256 + +typedef BITSET_DECLARE(regmask_t, 2 * MAX_REG); + +static inline bool +__regmask_get(regmask_t *regmask, struct ir3_register *reg, unsigned n) +{ + if (reg->merged) { + /* a6xx+ case, with merged register file, we track things in terms + * of half-precision registers, with a full precisions register + * using two half-precision slots: + */ + if (reg->flags & IR3_REG_HALF) { + return BITSET_TEST(*regmask, n); + } else { + n *= 2; + return BITSET_TEST(*regmask, n) || BITSET_TEST(*regmask, n+1); + } + } else { + /* pre a6xx case, with separate register file for half and full + * precision: + */ + if (reg->flags & IR3_REG_HALF) + n += MAX_REG; + return BITSET_TEST(*regmask, n); + } +} + +static inline void +__regmask_set(regmask_t *regmask, struct ir3_register *reg, unsigned n) +{ + if (reg->merged) { + /* a6xx+ case, with merged register file, we track things in terms + * of half-precision registers, with a full precisions register + * using two half-precision slots: + */ + if (reg->flags & IR3_REG_HALF) { + BITSET_SET(*regmask, n); + } else { + n *= 2; + BITSET_SET(*regmask, n); + BITSET_SET(*regmask, n+1); + } + } else { + /* pre a6xx case, with separate register file for half and full + * precision: + */ + if (reg->flags & IR3_REG_HALF) + n += MAX_REG; + BITSET_SET(*regmask, n); + } +} + +static inline void regmask_init(regmask_t *regmask) +{ + memset(regmask, 0, sizeof(*regmask)); +} + +static inline void regmask_set(regmask_t *regmask, struct ir3_register *reg) +{ + if (reg->flags & IR3_REG_RELATIV) { + for (unsigned i = 0; i < reg->size; i++) + __regmask_set(regmask, reg, reg->array.offset + i); + } else { + for (unsigned mask = reg->wrmask, n = reg->num; mask; mask >>= 1, n++) + if (mask & 1) + __regmask_set(regmask, reg, n); + } +} + +static inline void regmask_or(regmask_t *dst, regmask_t *a, regmask_t *b) +{ + unsigned i; + for (i = 0; i < ARRAY_SIZE(*dst); i++) + (*dst)[i] = (*a)[i] | (*b)[i]; +} + +static inline bool regmask_get(regmask_t *regmask, + struct ir3_register *reg) +{ + if (reg->flags & IR3_REG_RELATIV) { + for (unsigned i = 0; i < reg->size; i++) + if (__regmask_get(regmask, reg, reg->array.offset + i)) + return true; + } else { + for (unsigned mask = reg->wrmask, n = reg->num; mask; mask >>= 1, n++) + if (mask & 1) + if (__regmask_get(regmask, reg, n)) + return true; + } + return false; +} + +/* ************************************************************************* */ + +#endif /* IR3_H_ */ diff --git a/selfdrive/modeld/thneed/debug/decompiler/shader_enums.h b/selfdrive/modeld/thneed/debug/decompiler/shader_enums.h new file mode 100644 index 0000000000..b33a91727a --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/shader_enums.h @@ -0,0 +1,906 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 1999-2008 Brian Paul All Rights Reserved. + * Copyright (C) 2009 VMware, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef SHADER_ENUMS_H +#define SHADER_ENUMS_H + +#include + +/* Project-wide (GL and Vulkan) maximum. */ +#define MAX_DRAW_BUFFERS 8 + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Shader stages. + * + * The order must match how shaders are ordered in the pipeline. + * The GLSL linker assumes that if i is the maximum number of + * invocations in a sub-group. The maximum + * supported in this extension is 64." + * + * The spec defines this as a uniform. However, it's highly unlikely that + * implementations actually treat it as a uniform (which is loaded from a + * constant buffer). Most likely, this is an implementation-wide constant, + * or perhaps something that depends on the shader stage. + */ + SYSTEM_VALUE_SUBGROUP_SIZE, + + /** + * From the GL_ARB_shader_ballot spec: + * + * "The variable holds the index of the + * invocation within sub-group. This variable is in the range 0 to + * -1, where is the total + * number of invocations in a sub-group." + */ + SYSTEM_VALUE_SUBGROUP_INVOCATION, + + /** + * From the GL_ARB_shader_ballot spec: + * + * "The variables provide a bitmask for all + * invocations, with one bit per invocation starting with the least + * significant bit, according to the following table, + * + * variable equation for bit values + * -------------------- ------------------------------------ + * gl_SubGroupEqMaskARB bit index == gl_SubGroupInvocationARB + * gl_SubGroupGeMaskARB bit index >= gl_SubGroupInvocationARB + * gl_SubGroupGtMaskARB bit index > gl_SubGroupInvocationARB + * gl_SubGroupLeMaskARB bit index <= gl_SubGroupInvocationARB + * gl_SubGroupLtMaskARB bit index < gl_SubGroupInvocationARB + */ + SYSTEM_VALUE_SUBGROUP_EQ_MASK, + SYSTEM_VALUE_SUBGROUP_GE_MASK, + SYSTEM_VALUE_SUBGROUP_GT_MASK, + SYSTEM_VALUE_SUBGROUP_LE_MASK, + SYSTEM_VALUE_SUBGROUP_LT_MASK, + /*@}*/ + + /** + * Builtin variables added by VK_KHR_subgroups + */ + /*@{*/ + SYSTEM_VALUE_NUM_SUBGROUPS, + SYSTEM_VALUE_SUBGROUP_ID, + /*@}*/ + + /*@}*/ + + /** + * \name Vertex shader system values + */ + /*@{*/ + /** + * OpenGL-style vertex ID. + * + * Section 2.11.7 (Shader Execution), subsection Shader Inputs, of the + * OpenGL 3.3 core profile spec says: + * + * "gl_VertexID holds the integer index i implicitly passed by + * DrawArrays or one of the other drawing commands defined in section + * 2.8.3." + * + * Section 2.8.3 (Drawing Commands) of the same spec says: + * + * "The commands....are equivalent to the commands with the same base + * name (without the BaseVertex suffix), except that the ith element + * transferred by the corresponding draw call will be taken from + * element indices[i] + basevertex of each enabled array." + * + * Additionally, the overview in the GL_ARB_shader_draw_parameters spec + * says: + * + * "In unextended GL, vertex shaders have inputs named gl_VertexID and + * gl_InstanceID, which contain, respectively the index of the vertex + * and instance. The value of gl_VertexID is the implicitly passed + * index of the vertex being processed, which includes the value of + * baseVertex, for those commands that accept it." + * + * gl_VertexID gets basevertex added in. This differs from DirectX where + * SV_VertexID does \b not get basevertex added in. + * + * \note + * If all system values are available, \c SYSTEM_VALUE_VERTEX_ID will be + * equal to \c SYSTEM_VALUE_VERTEX_ID_ZERO_BASE plus + * \c SYSTEM_VALUE_BASE_VERTEX. + * + * \sa SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, SYSTEM_VALUE_BASE_VERTEX + */ + SYSTEM_VALUE_VERTEX_ID, + + /** + * Instanced ID as supplied to gl_InstanceID + * + * Values assigned to gl_InstanceID always begin with zero, regardless of + * the value of baseinstance. + * + * Section 11.1.3.9 (Shader Inputs) of the OpenGL 4.4 core profile spec + * says: + * + * "gl_InstanceID holds the integer instance number of the current + * primitive in an instanced draw call (see section 10.5)." + * + * Through a big chain of pseudocode, section 10.5 describes that + * baseinstance is not counted by gl_InstanceID. In that section, notice + * + * "If an enabled vertex attribute array is instanced (it has a + * non-zero divisor as specified by VertexAttribDivisor), the element + * index that is transferred to the GL, for all vertices, is given by + * + * floor(instance/divisor) + baseinstance + * + * If an array corresponding to an attribute required by a vertex + * shader is not enabled, then the corresponding element is taken from + * the current attribute state (see section 10.2)." + * + * Note that baseinstance is \b not included in the value of instance. + */ + SYSTEM_VALUE_INSTANCE_ID, + + /** + * Vulkan InstanceIndex. + * + * InstanceIndex = gl_InstanceID + gl_BaseInstance + */ + SYSTEM_VALUE_INSTANCE_INDEX, + + /** + * DirectX-style vertex ID. + * + * Unlike \c SYSTEM_VALUE_VERTEX_ID, this system value does \b not include + * the value of basevertex. + * + * \sa SYSTEM_VALUE_VERTEX_ID, SYSTEM_VALUE_BASE_VERTEX + */ + SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, + + /** + * Value of \c basevertex passed to \c glDrawElementsBaseVertex and similar + * functions. + * + * \sa SYSTEM_VALUE_VERTEX_ID, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE + */ + SYSTEM_VALUE_BASE_VERTEX, + + /** + * Depending on the type of the draw call (indexed or non-indexed), + * is the value of \c basevertex passed to \c glDrawElementsBaseVertex and + * similar, or is the value of \c first passed to \c glDrawArrays and + * similar. + * + * \note + * It can be used to calculate the \c SYSTEM_VALUE_VERTEX_ID as + * \c SYSTEM_VALUE_VERTEX_ID_ZERO_BASE plus \c SYSTEM_VALUE_FIRST_VERTEX. + * + * \sa SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, SYSTEM_VALUE_VERTEX_ID + */ + SYSTEM_VALUE_FIRST_VERTEX, + + /** + * If the Draw command used to start the rendering was an indexed draw + * or not (~0/0). Useful to calculate \c SYSTEM_VALUE_BASE_VERTEX as + * \c SYSTEM_VALUE_IS_INDEXED_DRAW & \c SYSTEM_VALUE_FIRST_VERTEX. + */ + SYSTEM_VALUE_IS_INDEXED_DRAW, + + /** + * Value of \c baseinstance passed to instanced draw entry points + * + * \sa SYSTEM_VALUE_INSTANCE_ID + */ + SYSTEM_VALUE_BASE_INSTANCE, + + /** + * From _ARB_shader_draw_parameters: + * + * "Additionally, this extension adds a further built-in variable, + * gl_DrawID to the shading language. This variable contains the index + * of the draw currently being processed by a Multi* variant of a + * drawing command (such as MultiDrawElements or + * MultiDrawArraysIndirect)." + * + * If GL_ARB_multi_draw_indirect is not supported, this is always 0. + */ + SYSTEM_VALUE_DRAW_ID, + /*@}*/ + + /** + * \name Geometry shader system values + */ + /*@{*/ + SYSTEM_VALUE_INVOCATION_ID, /**< (Also in Tessellation Control shader) */ + /*@}*/ + + /** + * \name Fragment shader system values + */ + /*@{*/ + SYSTEM_VALUE_FRAG_COORD, + SYSTEM_VALUE_POINT_COORD, + SYSTEM_VALUE_FRONT_FACE, + SYSTEM_VALUE_SAMPLE_ID, + SYSTEM_VALUE_SAMPLE_POS, + SYSTEM_VALUE_SAMPLE_MASK_IN, + SYSTEM_VALUE_HELPER_INVOCATION, + SYSTEM_VALUE_COLOR0, + SYSTEM_VALUE_COLOR1, + /*@}*/ + + /** + * \name Tessellation Evaluation shader system values + */ + /*@{*/ + SYSTEM_VALUE_TESS_COORD, + SYSTEM_VALUE_VERTICES_IN, /**< Tessellation vertices in input patch */ + SYSTEM_VALUE_PRIMITIVE_ID, + SYSTEM_VALUE_TESS_LEVEL_OUTER, /**< TES input */ + SYSTEM_VALUE_TESS_LEVEL_INNER, /**< TES input */ + SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT, /**< TCS input for passthru TCS */ + SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT, /**< TCS input for passthru TCS */ + /*@}*/ + + /** + * \name Compute shader system values + */ + /*@{*/ + SYSTEM_VALUE_LOCAL_INVOCATION_ID, + SYSTEM_VALUE_LOCAL_INVOCATION_INDEX, + SYSTEM_VALUE_GLOBAL_INVOCATION_ID, + SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX, + SYSTEM_VALUE_WORK_GROUP_ID, + SYSTEM_VALUE_NUM_WORK_GROUPS, + SYSTEM_VALUE_LOCAL_GROUP_SIZE, + SYSTEM_VALUE_GLOBAL_GROUP_SIZE, + SYSTEM_VALUE_WORK_DIM, + SYSTEM_VALUE_USER_DATA_AMD, + /*@}*/ + + /** Required for VK_KHR_device_group */ + SYSTEM_VALUE_DEVICE_INDEX, + + /** Required for VK_KHX_multiview */ + SYSTEM_VALUE_VIEW_INDEX, + + /** + * Driver internal vertex-count, used (for example) for drivers to + * calculate stride for stream-out outputs. Not externally visible. + */ + SYSTEM_VALUE_VERTEX_CNT, + + /** + * Required for AMD_shader_explicit_vertex_parameter and also used for + * varying-fetch instructions. + * + * The _SIZE value is "primitive size", used to scale i/j in primitive + * space to pixel space. + */ + SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL, + SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE, + SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID, + SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE, + SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL, + SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID, + SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE, + SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL, + + /** + * IR3 specific geometry shader and tesselation control shader system + * values that packs invocation id, thread id and vertex id. Having this + * as a nir level system value lets us do the unpacking in nir. + */ + SYSTEM_VALUE_GS_HEADER_IR3, + SYSTEM_VALUE_TCS_HEADER_IR3, + + SYSTEM_VALUE_MAX /**< Number of values */ +} gl_system_value; + +const char *gl_system_value_name(gl_system_value sysval); + +/** + * The possible interpolation qualifiers that can be applied to a fragment + * shader input in GLSL. + * + * Note: INTERP_MODE_NONE must be 0 so that memsetting the + * ir_variable data structure to 0 causes the default behavior. + */ +enum glsl_interp_mode +{ + INTERP_MODE_NONE = 0, + INTERP_MODE_SMOOTH, + INTERP_MODE_FLAT, + INTERP_MODE_NOPERSPECTIVE, + INTERP_MODE_EXPLICIT, + INTERP_MODE_COUNT /**< Number of interpolation qualifiers */ +}; + +enum glsl_interface_packing { + GLSL_INTERFACE_PACKING_STD140, + GLSL_INTERFACE_PACKING_SHARED, + GLSL_INTERFACE_PACKING_PACKED, + GLSL_INTERFACE_PACKING_STD430 +}; + +const char *glsl_interp_mode_name(enum glsl_interp_mode qual); + +/** + * Fragment program results + */ +typedef enum +{ + FRAG_RESULT_DEPTH = 0, + FRAG_RESULT_STENCIL = 1, + /* If a single color should be written to all render targets, this + * register is written. No FRAG_RESULT_DATAn will be written. + */ + FRAG_RESULT_COLOR = 2, + FRAG_RESULT_SAMPLE_MASK = 3, + + /* FRAG_RESULT_DATAn are the per-render-target (GLSL gl_FragData[n] + * or ARB_fragment_program fragment.color[n]) color results. If + * any are written, FRAG_RESULT_COLOR will not be written. + * FRAG_RESULT_DATA1 and up are simply for the benefit of + * gl_frag_result_name() and not to be construed as an upper bound + */ + FRAG_RESULT_DATA0 = 4, + FRAG_RESULT_DATA1, + FRAG_RESULT_DATA2, + FRAG_RESULT_DATA3, + FRAG_RESULT_DATA4, + FRAG_RESULT_DATA5, + FRAG_RESULT_DATA6, + FRAG_RESULT_DATA7, +} gl_frag_result; + +const char *gl_frag_result_name(gl_frag_result result); + +#define FRAG_RESULT_MAX (FRAG_RESULT_DATA0 + MAX_DRAW_BUFFERS) + +/** + * \brief Layout qualifiers for gl_FragDepth. + * + * Extension AMD_conservative_depth allows gl_FragDepth to be redeclared with + * a layout qualifier. + * + * \see enum ir_depth_layout + */ +enum gl_frag_depth_layout +{ + FRAG_DEPTH_LAYOUT_NONE, /**< No layout is specified. */ + FRAG_DEPTH_LAYOUT_ANY, + FRAG_DEPTH_LAYOUT_GREATER, + FRAG_DEPTH_LAYOUT_LESS, + FRAG_DEPTH_LAYOUT_UNCHANGED +}; + +/** + * \brief Buffer access qualifiers + */ +enum gl_access_qualifier +{ + ACCESS_COHERENT = (1 << 0), + ACCESS_RESTRICT = (1 << 1), + ACCESS_VOLATILE = (1 << 2), + ACCESS_NON_READABLE = (1 << 3), + ACCESS_NON_WRITEABLE = (1 << 4), + + /** The access may use a non-uniform buffer or image index */ + ACCESS_NON_UNIFORM = (1 << 5), + + /* This has the same semantics as NIR_INTRINSIC_CAN_REORDER, only to be + * used with loads. In other words, it means that the load can be + * arbitrarily reordered, or combined with other loads to the same address. + * It is implied by ACCESS_NON_WRITEABLE together with ACCESS_RESTRICT, and + * a lack of ACCESS_COHERENT and ACCESS_VOLATILE. + */ + ACCESS_CAN_REORDER = (1 << 6), + + /** Use as little cache space as possible. */ + ACCESS_STREAM_CACHE_POLICY = (1 << 7), +}; + +/** + * \brief Blend support qualifiers + */ +enum gl_advanced_blend_mode +{ + BLEND_NONE = 0x0000, + + BLEND_MULTIPLY = 0x0001, + BLEND_SCREEN = 0x0002, + BLEND_OVERLAY = 0x0004, + BLEND_DARKEN = 0x0008, + BLEND_LIGHTEN = 0x0010, + BLEND_COLORDODGE = 0x0020, + BLEND_COLORBURN = 0x0040, + BLEND_HARDLIGHT = 0x0080, + BLEND_SOFTLIGHT = 0x0100, + BLEND_DIFFERENCE = 0x0200, + BLEND_EXCLUSION = 0x0400, + BLEND_HSL_HUE = 0x0800, + BLEND_HSL_SATURATION = 0x1000, + BLEND_HSL_COLOR = 0x2000, + BLEND_HSL_LUMINOSITY = 0x4000, + + BLEND_ALL = 0x7fff, +}; + +enum blend_func +{ + BLEND_FUNC_ADD, + BLEND_FUNC_SUBTRACT, + BLEND_FUNC_REVERSE_SUBTRACT, + BLEND_FUNC_MIN, + BLEND_FUNC_MAX, +}; + +enum blend_factor +{ + BLEND_FACTOR_ZERO, + BLEND_FACTOR_SRC_COLOR, + BLEND_FACTOR_DST_COLOR, + BLEND_FACTOR_SRC_ALPHA, + BLEND_FACTOR_DST_ALPHA, + BLEND_FACTOR_CONSTANT_COLOR, + BLEND_FACTOR_CONSTANT_ALPHA, + BLEND_FACTOR_SRC_ALPHA_SATURATE, +}; + +enum gl_tess_spacing +{ + TESS_SPACING_UNSPECIFIED, + TESS_SPACING_EQUAL, + TESS_SPACING_FRACTIONAL_ODD, + TESS_SPACING_FRACTIONAL_EVEN, +}; + +/** + * A compare function enum for use in compiler lowering passes. This is in + * the same order as GL's compare functions (shifted down by GL_NEVER), and is + * exactly the same as gallium's PIPE_FUNC_*. + */ +enum compare_func +{ + COMPARE_FUNC_NEVER, + COMPARE_FUNC_LESS, + COMPARE_FUNC_EQUAL, + COMPARE_FUNC_LEQUAL, + COMPARE_FUNC_GREATER, + COMPARE_FUNC_NOTEQUAL, + COMPARE_FUNC_GEQUAL, + COMPARE_FUNC_ALWAYS, +}; + +/** + * Arrangements for grouping invocations from NV_compute_shader_derivatives. + * + * The extension provides new layout qualifiers that support two different + * arrangements of compute shader invocations for the purpose of derivative + * computation. When specifying + * + * layout(derivative_group_quadsNV) in; + * + * compute shader invocations are grouped into 2x2x1 arrays whose four local + * invocation ID values follow the pattern: + * + * +-----------------+------------------+ + * | (2x+0, 2y+0, z) | (2x+1, 2y+0, z) | + * +-----------------+------------------+ + * | (2x+0, 2y+1, z) | (2x+1, 2y+1, z) | + * +-----------------+------------------+ + * + * where Y increases from bottom to top. When specifying + * + * layout(derivative_group_linearNV) in; + * + * compute shader invocations are grouped into 2x2x1 arrays whose four local + * invocation index values follow the pattern: + * + * +------+------+ + * | 4n+0 | 4n+1 | + * +------+------+ + * | 4n+2 | 4n+3 | + * +------+------+ + * + * If neither layout qualifier is specified, derivatives in compute shaders + * return zero, which is consistent with the handling of built-in texture + * functions like texture() in GLSL 4.50 compute shaders. + */ +enum gl_derivative_group { + DERIVATIVE_GROUP_NONE = 0, + DERIVATIVE_GROUP_QUADS, + DERIVATIVE_GROUP_LINEAR, +}; + +enum float_controls +{ + FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE = 0x0000, + FLOAT_CONTROLS_DENORM_PRESERVE_FP16 = 0x0001, + FLOAT_CONTROLS_DENORM_PRESERVE_FP32 = 0x0002, + FLOAT_CONTROLS_DENORM_PRESERVE_FP64 = 0x0004, + FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 = 0x0008, + FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32 = 0x0010, + FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64 = 0x0020, + FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 = 0x0040, + FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32 = 0x0080, + FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64 = 0x0100, + FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 = 0x0200, + FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 = 0x0400, + FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64 = 0x0800, + FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 = 0x1000, + FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 = 0x2000, + FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 = 0x4000, +}; + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* SHADER_ENUMS_H */ diff --git a/selfdrive/modeld/thneed/debug/decompiler/util/bitset.h b/selfdrive/modeld/thneed/debug/decompiler/util/bitset.h new file mode 100644 index 0000000000..264144c39b --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/util/bitset.h @@ -0,0 +1,261 @@ +/* + * Mesa 3-D graphics library + * + * Copyright (C) 2006 Brian Paul All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/** + * \file bitset.h + * \brief Bitset of arbitrary size definitions. + * \author Michal Krol + */ + +#ifndef BITSET_H +#define BITSET_H + +//#include "util/bitscan.h" +//#include "util/macros.h" + +/**************************************************************************** + * generic bitset implementation + */ + +#define BITSET_WORD unsigned int +#define BITSET_WORDBITS (sizeof (BITSET_WORD) * 8) + +/* bitset declarations + */ +#define BITSET_WORDS(bits) (((bits) + BITSET_WORDBITS - 1) / BITSET_WORDBITS) +#define BITSET_DECLARE(name, bits) BITSET_WORD name[BITSET_WORDS(bits)] + +/* bitset operations + */ +#define BITSET_COPY(x, y) memcpy( (x), (y), sizeof (x) ) +#define BITSET_EQUAL(x, y) (memcmp( (x), (y), sizeof (x) ) == 0) +#define BITSET_ZERO(x) memset( (x), 0, sizeof (x) ) +#define BITSET_ONES(x) memset( (x), 0xff, sizeof (x) ) + +#define BITSET_BITWORD(b) ((b) / BITSET_WORDBITS) +#define BITSET_BIT(b) (1u << ((b) % BITSET_WORDBITS)) + +/* single bit operations + */ +#define BITSET_TEST(x, b) (((x)[BITSET_BITWORD(b)] & BITSET_BIT(b)) != 0) +#define BITSET_SET(x, b) ((x)[BITSET_BITWORD(b)] |= BITSET_BIT(b)) +#define BITSET_CLEAR(x, b) ((x)[BITSET_BITWORD(b)] &= ~BITSET_BIT(b)) + +#define BITSET_MASK(b) (((b) % BITSET_WORDBITS == 0) ? ~0 : BITSET_BIT(b) - 1) +#define BITSET_RANGE(b, e) ((BITSET_MASK((e) + 1)) & ~(BITSET_BIT(b) - 1)) + +/* bit range operations + */ +#define BITSET_TEST_RANGE(x, b, e) \ + (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \ + (((x)[BITSET_BITWORD(b)] & BITSET_RANGE(b, e)) != 0) : \ + (assert (!"BITSET_TEST_RANGE: bit range crosses word boundary"), 0)) +#define BITSET_SET_RANGE(x, b, e) \ + (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \ + ((x)[BITSET_BITWORD(b)] |= BITSET_RANGE(b, e)) : \ + (assert (!"BITSET_SET_RANGE: bit range crosses word boundary"), 0)) +#define BITSET_CLEAR_RANGE(x, b, e) \ + (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \ + ((x)[BITSET_BITWORD(b)] &= ~BITSET_RANGE(b, e)) : \ + (assert (!"BITSET_CLEAR_RANGE: bit range crosses word boundary"), 0)) + +/* Get first bit set in a bitset. + */ +static inline int +__bitset_ffs(const BITSET_WORD *x, int n) +{ + int i; + + for (i = 0; i < n; i++) { + if (x[i]) + return ffs(x[i]) + BITSET_WORDBITS * i; + } + + return 0; +} + +#define BITSET_FFS(x) __bitset_ffs(x, ARRAY_SIZE(x)) + +static inline unsigned +__bitset_next_set(unsigned i, BITSET_WORD *tmp, + const BITSET_WORD *set, unsigned size) +{ + unsigned bit, word; + + /* NOTE: The initial conditions for this function are very specific. At + * the start of the loop, the tmp variable must be set to *set and the + * initial i value set to 0. This way, if there is a bit set in the first + * word, we ignore the i-value and just grab that bit (so 0 is ok, even + * though 0 may be returned). If the first word is 0, then the value of + * `word` will be 0 and we will go on to look at the second word. + */ + word = BITSET_BITWORD(i); + while (*tmp == 0) { + word++; + + if (word >= BITSET_WORDS(size)) + return size; + + *tmp = set[word]; + } + + /* Find the next set bit in the non-zero word */ + bit = ffs(*tmp) - 1; + + /* Unset the bit */ + *tmp &= ~(1ull << bit); + + return word * BITSET_WORDBITS + bit; +} + +/** + * Iterates over each set bit in a set + * + * @param __i iteration variable, bit number + * @param __set the bitset to iterate (will not be modified) + * @param __size number of bits in the set to consider + */ +#define BITSET_FOREACH_SET(__i, __set, __size) \ + for (BITSET_WORD __tmp = *(__set), *__foo = &__tmp; __foo != NULL; __foo = NULL) \ + for (__i = 0; \ + (__i = __bitset_next_set(__i, &__tmp, __set, __size)) < __size;) + +#ifdef __cplusplus + +/** + * Simple C++ wrapper of a bitset type of static size, with value semantics + * and basic bitwise arithmetic operators. The operators defined below are + * expected to have the same semantics as the same operator applied to other + * fundamental integer types. T is the name of the struct to instantiate + * it as, and N is the number of bits in the bitset. + */ +#define DECLARE_BITSET_T(T, N) struct T { \ + EXPLICIT_CONVERSION \ + operator bool() const \ + { \ + for (unsigned i = 0; i < BITSET_WORDS(N); i++) \ + if (words[i]) \ + return true; \ + return false; \ + } \ + \ + T & \ + operator=(int x) \ + { \ + const T c = {{ (BITSET_WORD)x }}; \ + return *this = c; \ + } \ + \ + friend bool \ + operator==(const T &b, const T &c) \ + { \ + return BITSET_EQUAL(b.words, c.words); \ + } \ + \ + friend bool \ + operator!=(const T &b, const T &c) \ + { \ + return !(b == c); \ + } \ + \ + friend bool \ + operator==(const T &b, int x) \ + { \ + const T c = {{ (BITSET_WORD)x }}; \ + return b == c; \ + } \ + \ + friend bool \ + operator!=(const T &b, int x) \ + { \ + return !(b == x); \ + } \ + \ + friend T \ + operator~(const T &b) \ + { \ + T c; \ + for (unsigned i = 0; i < BITSET_WORDS(N); i++) \ + c.words[i] = ~b.words[i]; \ + return c; \ + } \ + \ + T & \ + operator|=(const T &b) \ + { \ + for (unsigned i = 0; i < BITSET_WORDS(N); i++) \ + words[i] |= b.words[i]; \ + return *this; \ + } \ + \ + friend T \ + operator|(const T &b, const T &c) \ + { \ + T d = b; \ + d |= c; \ + return d; \ + } \ + \ + T & \ + operator&=(const T &b) \ + { \ + for (unsigned i = 0; i < BITSET_WORDS(N); i++) \ + words[i] &= b.words[i]; \ + return *this; \ + } \ + \ + friend T \ + operator&(const T &b, const T &c) \ + { \ + T d = b; \ + d &= c; \ + return d; \ + } \ + \ + bool \ + test(unsigned i) const \ + { \ + return BITSET_TEST(words, i); \ + } \ + \ + T & \ + set(unsigned i) \ + { \ + BITSET_SET(words, i); \ + return *this; \ + } \ + \ + T & \ + clear(unsigned i) \ + { \ + BITSET_CLEAR(words, i); \ + return *this; \ + } \ + \ + BITSET_WORD words[BITSET_WORDS(N)]; \ + } + +#endif + +#endif diff --git a/selfdrive/modeld/thneed/debug/decompiler/util/list.h b/selfdrive/modeld/thneed/debug/decompiler/util/list.h new file mode 100644 index 0000000000..7f36e8c39d --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/util/list.h @@ -0,0 +1,262 @@ +/************************************************************************** + * + * Copyright 2006 VMware, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + **************************************************************************/ + +/** + * \file + * List macros heavily inspired by the Linux kernel + * list handling. No list looping yet. + * + * Is not threadsafe, so common operations need to + * be protected using an external mutex. + */ + +#ifndef _UTIL_LIST_H_ +#define _UTIL_LIST_H_ + + +#include +#include +#include + +#ifdef DEBUG +# define list_assert(cond, msg) assert(cond && msg) +#else +# define list_assert(cond, msg) (void)(0 && (cond)) +#endif + +struct list_head +{ + struct list_head *prev; + struct list_head *next; +}; + +static inline void list_inithead(struct list_head *item) +{ + item->prev = item; + item->next = item; +} + +static inline void list_add(struct list_head *item, struct list_head *list) +{ + item->prev = list; + item->next = list->next; + list->next->prev = item; + list->next = item; +} + +static inline void list_addtail(struct list_head *item, struct list_head *list) +{ + item->next = list; + item->prev = list->prev; + list->prev->next = item; + list->prev = item; +} + +static inline bool list_is_empty(const struct list_head *list); + +static inline void list_replace(struct list_head *from, struct list_head *to) +{ + if (list_is_empty(from)) { + list_inithead(to); + } else { + to->prev = from->prev; + to->next = from->next; + from->next->prev = to; + from->prev->next = to; + } +} + +static inline void list_del(struct list_head *item) +{ + item->prev->next = item->next; + item->next->prev = item->prev; + item->prev = item->next = NULL; +} + +static inline void list_delinit(struct list_head *item) +{ + item->prev->next = item->next; + item->next->prev = item->prev; + item->next = item; + item->prev = item; +} + +static inline bool list_is_empty(const struct list_head *list) +{ + return list->next == list; +} + +/** + * Returns whether the list has exactly one element. + */ +static inline bool list_is_singular(const struct list_head *list) +{ + return list->next != NULL && list->next != list && list->next->next == list; +} + +static inline unsigned list_length(const struct list_head *list) +{ + struct list_head *node; + unsigned length = 0; + for (node = list->next; node != list; node = node->next) + length++; + return length; +} + +static inline void list_splice(struct list_head *src, struct list_head *dst) +{ + if (list_is_empty(src)) + return; + + src->next->prev = dst; + src->prev->next = dst->next; + dst->next->prev = src->prev; + dst->next = src->next; +} + +static inline void list_splicetail(struct list_head *src, struct list_head *dst) +{ + if (list_is_empty(src)) + return; + + src->prev->next = dst; + src->next->prev = dst->prev; + dst->prev->next = src->next; + dst->prev = src->prev; +} + +static inline void list_validate(const struct list_head *list) +{ + struct list_head *node; + assert(list->next->prev == list && list->prev->next == list); + for (node = list->next; node != list; node = node->next) + assert(node->next->prev == node && node->prev->next == node); +} + +#define LIST_ENTRY(__type, __item, __field) \ + ((__type *)(((char *)(__item)) - offsetof(__type, __field))) + +/** + * Cast from a pointer to a member of a struct back to the containing struct. + * + * 'sample' MUST be initialized, or else the result is undefined! + */ +#ifndef container_of +#define container_of(ptr, sample, member) \ + (void *)((char *)(ptr) \ + - ((char *)&(sample)->member - (char *)(sample))) +#endif + +#define list_first_entry(ptr, type, member) \ + LIST_ENTRY(type, (ptr)->next, member) + +#define list_last_entry(ptr, type, member) \ + LIST_ENTRY(type, (ptr)->prev, member) + + +#define LIST_FOR_EACH_ENTRY(pos, head, member) \ + for (pos = NULL, pos = container_of((head)->next, pos, member); \ + &pos->member != (head); \ + pos = container_of(pos->member.next, pos, member)) + +#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \ + for (pos = NULL, pos = container_of((head)->next, pos, member), \ + storage = container_of(pos->member.next, pos, member); \ + &pos->member != (head); \ + pos = storage, storage = container_of(storage->member.next, storage, member)) + +#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \ + for (pos = NULL, pos = container_of((head)->prev, pos, member), \ + storage = container_of(pos->member.prev, pos, member); \ + &pos->member != (head); \ + pos = storage, storage = container_of(storage->member.prev, storage, member)) + +#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \ + for (pos = NULL, pos = container_of((start), pos, member); \ + &pos->member != (head); \ + pos = container_of(pos->member.next, pos, member)) + +#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \ + for (pos = NULL, pos = container_of((start), pos, member); \ + &pos->member != (head); \ + pos = container_of(pos->member.prev, pos, member)) + +#define list_for_each_entry(type, pos, head, member) \ + for (type *pos = LIST_ENTRY(type, (head)->next, member), \ + *__next = LIST_ENTRY(type, pos->member.next, member); \ + &pos->member != (head); \ + pos = LIST_ENTRY(type, pos->member.next, member), \ + list_assert(pos == __next, "use _safe iterator"), \ + __next = LIST_ENTRY(type, __next->member.next, member)) + +#define list_for_each_entry_safe(type, pos, head, member) \ + for (type *pos = LIST_ENTRY(type, (head)->next, member), \ + *__next = LIST_ENTRY(type, pos->member.next, member); \ + &pos->member != (head); \ + pos = __next, \ + __next = LIST_ENTRY(type, __next->member.next, member)) + +#define list_for_each_entry_rev(type, pos, head, member) \ + for (type *pos = LIST_ENTRY(type, (head)->prev, member), \ + *__prev = LIST_ENTRY(type, pos->member.prev, member); \ + &pos->member != (head); \ + pos = LIST_ENTRY(type, pos->member.prev, member), \ + list_assert(pos == __prev, "use _safe iterator"), \ + __prev = LIST_ENTRY(type, __prev->member.prev, member)) + +#define list_for_each_entry_safe_rev(type, pos, head, member) \ + for (type *pos = LIST_ENTRY(type, (head)->prev, member), \ + *__prev = LIST_ENTRY(type, pos->member.prev, member); \ + &pos->member != (head); \ + pos = __prev, \ + __prev = LIST_ENTRY(type, __prev->member.prev, member)) + +#define list_for_each_entry_from(type, pos, start, head, member) \ + for (type *pos = LIST_ENTRY(type, (start), member); \ + &pos->member != (head); \ + pos = LIST_ENTRY(type, pos->member.next, member)) + +#define list_for_each_entry_from_safe(type, pos, start, head, member) \ + for (type *pos = LIST_ENTRY(type, (start), member), \ + *__next = LIST_ENTRY(type, pos->member.next, member); \ + &pos->member != (head); \ + pos = __next, \ + __next = LIST_ENTRY(type, __next->member.next, member)) + +#define list_for_each_entry_from_rev(type, pos, start, head, member) \ + for (type *pos = LIST_ENTRY(type, (start), member); \ + &pos->member != (head); \ + pos = LIST_ENTRY(type, pos->member.prev, member)) + +#define list_pair_for_each_entry(type, pos1, pos2, head1, head2, member) \ + for (type *pos1 = LIST_ENTRY(type, (head1)->next, member), \ + *pos2 = LIST_ENTRY(type, (head2)->next, member); \ + &pos1->member != (head1) && &pos2->member != (head2); \ + pos1 = LIST_ENTRY(type, pos1->member.next, member), \ + pos2 = LIST_ENTRY(type, pos2->member.next, member)) + +#endif /*_UTIL_LIST_H_*/ diff --git a/selfdrive/modeld/thneed/debug/decompiler/util/macros.h b/selfdrive/modeld/thneed/debug/decompiler/util/macros.h new file mode 100644 index 0000000000..a36bdd411e --- /dev/null +++ b/selfdrive/modeld/thneed/debug/decompiler/util/macros.h @@ -0,0 +1,346 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UTIL_MACROS_H +#define UTIL_MACROS_H + +#include + +/* Compute the size of an array */ +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/* For compatibility with Clang's __has_builtin() */ +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +/** + * __builtin_expect macros + */ +#if !defined(HAVE___BUILTIN_EXPECT) +# define __builtin_expect(x, y) (x) +#endif + +#ifndef likely +# ifdef HAVE___BUILTIN_EXPECT +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +# else +# define likely(x) (x) +# define unlikely(x) (x) +# endif +#endif + + +/** + * Static (compile-time) assertion. + * Basically, use COND to dimension an array. If COND is false/zero the + * array size will be -1 and we'll get a compilation error. + */ +#define STATIC_ASSERT(COND) \ + do { \ + (void) sizeof(char [1 - 2*!(COND)]); \ + } while (0) + + +/** + * Unreachable macro. Useful for suppressing "control reaches end of non-void + * function" warnings. + */ +#if defined(HAVE___BUILTIN_UNREACHABLE) || __has_builtin(__builtin_unreachable) +#define unreachable(str) \ +do { \ + assert(!str); \ + __builtin_unreachable(); \ +} while (0) +#elif defined (_MSC_VER) +#define unreachable(str) \ +do { \ + assert(!str); \ + __assume(0); \ +} while (0) +#else +#define unreachable(str) assert(!str) +#endif + +/** + * Assume macro. Useful for expressing our assumptions to the compiler, + * typically for purposes of silencing warnings. + */ +#if __has_builtin(__builtin_assume) +#define assume(expr) \ +do { \ + assert(expr); \ + __builtin_assume(expr); \ +} while (0) +#elif defined HAVE___BUILTIN_UNREACHABLE +#define assume(expr) ((expr) ? ((void) 0) \ + : (assert(!"assumption failed"), \ + __builtin_unreachable())) +#elif defined (_MSC_VER) +#define assume(expr) __assume(expr) +#else +#define assume(expr) assert(expr) +#endif + +/* Attribute const is used for functions that have no effects other than their + * return value, and only rely on the argument values to compute the return + * value. As a result, calls to it can be CSEed. Note that using memory + * pointed to by the arguments is not allowed for const functions. + */ +#ifdef HAVE_FUNC_ATTRIBUTE_CONST +#define ATTRIBUTE_CONST __attribute__((__const__)) +#else +#define ATTRIBUTE_CONST +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_FLATTEN +#define FLATTEN __attribute__((__flatten__)) +#else +#define FLATTEN +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT +#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a))) +#else +#define PRINTFLIKE(f, a) +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC +#define MALLOCLIKE __attribute__((__malloc__)) +#else +#define MALLOCLIKE +#endif + +/* Forced function inlining */ +/* Note: Clang also sets __GNUC__ (see other cases below) */ +#ifndef ALWAYS_INLINE +# if defined(__GNUC__) +# define ALWAYS_INLINE inline __attribute__((always_inline)) +# elif defined(_MSC_VER) +# define ALWAYS_INLINE __forceinline +# else +# define ALWAYS_INLINE inline +# endif +#endif + +/* Used to optionally mark structures with misaligned elements or size as + * packed, to trade off performance for space. + */ +#ifdef HAVE_FUNC_ATTRIBUTE_PACKED +#define PACKED __attribute__((__packed__)) +#else +#define PACKED +#endif + +/* Attribute pure is used for functions that have no effects other than their + * return value. As a result, calls to it can be dead code eliminated. + */ +#ifdef HAVE_FUNC_ATTRIBUTE_PURE +#define ATTRIBUTE_PURE __attribute__((__pure__)) +#else +#define ATTRIBUTE_PURE +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL +#define ATTRIBUTE_RETURNS_NONNULL __attribute__((__returns_nonnull__)) +#else +#define ATTRIBUTE_RETURNS_NONNULL +#endif + +#ifndef NORETURN +# ifdef _MSC_VER +# define NORETURN __declspec(noreturn) +# elif defined HAVE_FUNC_ATTRIBUTE_NORETURN +# define NORETURN __attribute__((__noreturn__)) +# else +# define NORETURN +# endif +#endif + +#ifdef __cplusplus +/** + * Macro function that evaluates to true if T is a trivially + * destructible type -- that is, if its (non-virtual) destructor + * performs no action and all member variables and base classes are + * trivially destructible themselves. + */ +# if (defined(__clang__) && defined(__has_feature)) +# if __has_feature(has_trivial_destructor) +# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T) +# endif +# elif defined(__GNUC__) +# if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))) +# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T) +# endif +# elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) +# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T) +# endif +# ifndef HAS_TRIVIAL_DESTRUCTOR + /* It's always safe (if inefficient) to assume that a + * destructor is non-trivial. + */ +# define HAS_TRIVIAL_DESTRUCTOR(T) (false) +# endif +#endif + +/** + * PUBLIC/USED macros + * + * If we build the library with gcc's -fvisibility=hidden flag, we'll + * use the PUBLIC macro to mark functions that are to be exported. + * + * We also need to define a USED attribute, so the optimizer doesn't + * inline a static function that we later use in an alias. - ajax + */ +#ifndef PUBLIC +# if defined(__GNUC__) +# define PUBLIC __attribute__((visibility("default"))) +# define USED __attribute__((used)) +# elif defined(_MSC_VER) +# define PUBLIC __declspec(dllexport) +# define USED +# else +# define PUBLIC +# define USED +# endif +#endif + +/** + * UNUSED marks variables (or sometimes functions) that have to be defined, + * but are sometimes (or always) unused beyond that. A common case is for + * a function parameter to be used in some build configurations but not others. + * Another case is fallback vfuncs that don't do anything with their params. + * + * Note that this should not be used for identifiers used in `assert()`; + * see ASSERTED below. + */ +#ifdef HAVE_FUNC_ATTRIBUTE_UNUSED +#define UNUSED __attribute__((unused)) +#else +#define UNUSED +#endif + +/** + * Use ASSERTED to indicate that an identifier is unused outside of an `assert()`, + * so that assert-free builds don't get "unused variable" warnings. + */ +#ifdef NDEBUG +#define ASSERTED UNUSED +#else +#define ASSERTED +#endif + +#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT +#define MUST_CHECK __attribute__((warn_unused_result)) +#else +#define MUST_CHECK +#endif + +#if defined(__GNUC__) +#define ATTRIBUTE_NOINLINE __attribute__((noinline)) +#else +#define ATTRIBUTE_NOINLINE +#endif + + +/** + * Check that STRUCT::FIELD can hold MAXVAL. We use a lot of bitfields + * in Mesa/gallium. We have to be sure they're of sufficient size to + * hold the largest expected value. + * Note that with MSVC, enums are signed and enum bitfields need one extra + * high bit (always zero) to ensure the max value is handled correctly. + * This macro will detect that with MSVC, but not GCC. + */ +#define ASSERT_BITFIELD_SIZE(STRUCT, FIELD, MAXVAL) \ + do { \ + ASSERTED STRUCT s; \ + s.FIELD = (MAXVAL); \ + assert((int) s.FIELD == (MAXVAL) && "Insufficient bitfield size!"); \ + } while (0) + + +/** Compute ceiling of integer quotient of A divided by B. */ +#define DIV_ROUND_UP( A, B ) ( ((A) + (B) - 1) / (B) ) + +/** Clamp X to [MIN,MAX]. Turn NaN into MIN, arbitrarily. */ +#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) ) + +/** Minimum of two values: */ +#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) ) + +/** Maximum of two values: */ +#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) ) + +/** Minimum and maximum of three values: */ +#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C)) +#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C)) + +/** Align a value to a power of two */ +#define ALIGN_POT(x, pot_align) (((x) + (pot_align) - 1) & ~((pot_align) - 1)) + +/** + * Macro for declaring an explicit conversion operator. Defaults to an + * implicit conversion if C++11 is not supported. + */ +#if __cplusplus >= 201103L +#define EXPLICIT_CONVERSION explicit +#elif defined(__cplusplus) +#define EXPLICIT_CONVERSION +#endif + +/** Set a single bit */ +#define BITFIELD_BIT(b) (1u << (b)) +/** Set all bits up to excluding bit b */ +#define BITFIELD_MASK(b) \ + ((b) == 32 ? (~0u) : BITFIELD_BIT((b) % 32) - 1) +/** Set count bits starting from bit b */ +#define BITFIELD_RANGE(b, count) \ + (BITFIELD_MASK((b) + (count)) & ~BITFIELD_MASK(b)) + +/** Set a single bit */ +#define BITFIELD64_BIT(b) (1ull << (b)) +/** Set all bits up to excluding bit b */ +#define BITFIELD64_MASK(b) \ + ((b) == 64 ? (~0ull) : BITFIELD64_BIT(b) - 1) +/** Set count bits starting from bit b */ +#define BITFIELD64_RANGE(b, count) \ + (BITFIELD64_MASK((b) + (count)) & ~BITFIELD64_MASK(b)) + +/* TODO: In future we should try to move this to u_debug.h once header + * dependencies are reorganised to allow this. + */ +enum pipe_debug_type +{ + PIPE_DEBUG_TYPE_OUT_OF_MEMORY = 1, + PIPE_DEBUG_TYPE_ERROR, + PIPE_DEBUG_TYPE_SHADER_INFO, + PIPE_DEBUG_TYPE_PERF_INFO, + PIPE_DEBUG_TYPE_INFO, + PIPE_DEBUG_TYPE_FALLBACK, + PIPE_DEBUG_TYPE_CONFORMANCE, +}; + +#endif /* UTIL_MACROS_H */ diff --git a/selfdrive/modeld/thneed/debug/disassembler.cc b/selfdrive/modeld/thneed/debug/disassembler.cc new file mode 100644 index 0000000000..c1f7e6332c --- /dev/null +++ b/selfdrive/modeld/thneed/debug/disassembler.cc @@ -0,0 +1,132 @@ +#include "debug/include/adreno_pm4types.h" +#define REG_A5XX_TPL1_CS_TEX_CONST_LO 0x0000e760 +#define REG_A5XX_TPL1_CS_TEX_SAMP_LO 0x0000e75c +#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0 + +std::map regs = { + {0x0000e760, "REG_A5XX_TPL1_CS_TEX_CONST_LO"}, + {0x0000e75c, "REG_A5XX_TPL1_CS_TEX_SAMP_LO"}, + {0x00000e06, "REG_A5XX_HLSQ_MODE_CNTL"}, + {0x00000e91, "REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO"}, + {0x00000ec2, "REG_A5XX_SP_MODE_CNTL"}, + {0x0000e580, "REG_A5XX_SP_SP_CNTL"}, + {0x0000e5f0, "REG_A5XX_SP_CS_CTRL_REG0"}, + {0x0000e796, "REG_A5XX_HLSQ_CS_CNTL"}, + {0x0000e784, "REG_A5XX_HLSQ_CONTROL_0_REG"}, + {0x0000e7b0, "REG_A5XX_HLSQ_CS_NDRANGE_0"}, + {0x0000e7b9, "REG_A5XX_HLSQ_CS_KERNEL_GROUP_X"}, + {0x00000cdd, "REG_A5XX_VSC_RESOLVE_CNTL"}, +}; + +std::map ops = { + {33, "CP_REG_RMW"}, + {62, "CP_REG_TO_MEM"}, + {49, "CP_RUN_OPENCL"}, + {16, "CP_NOP"}, + {38, "CP_WAIT_FOR_IDLE"}, + {110, "CP_COMPUTE_CHECKPOINT"}, + {48, "CP_LOAD_STATE"}, +}; + +void CachedCommand::disassemble() { + uint32_t *src = (uint32_t *)cmds[1].gpuaddr; + int len = cmds[1].size/4; + printf("disassemble %p %d\n", src, len); + + int i = 0; + while (i < len) { + int pktsize; + int pkttype = -1; + + if (pkt_is_type0(src[i])) { + pkttype = 0; + pktsize = type0_pkt_size(src[i]); + } else if (pkt_is_type3(src[i])) { + pkttype = 3; + pktsize = type3_pkt_size(src[i]); + } else if (pkt_is_type4(src[i])) { + pkttype = 4; + pktsize = type4_pkt_size(src[i]); + } else if (pkt_is_type7(src[i])) { + pkttype = 7; + pktsize = type7_pkt_size(src[i]); + } + printf("%3d: type:%d size:%d ", i, pkttype, pktsize); + + if (pkttype == 7) { + int op = cp_type7_opcode(src[i]); + if (ops.find(op) != ops.end()) { + printf("%-40s ", ops[op].c_str()); + } else { + printf("op: %4d ", op); + } + } + + if (pkttype == 4) { + int reg = cp_type4_base_index_one_reg_wr(src[i]); + if (regs.find(reg) != regs.end()) { + printf("%-40s ", regs[reg].c_str()); + } else { + printf("reg: %4x ", reg); + } + } + + for (int j = 0; j < pktsize+1; j++) { + printf("%8.8X ", src[i+j]); + } + printf("\n"); + + uint64_t addr; + if (pkttype == 7) { + switch (cp_type7_opcode(src[i])) { + case CP_LOAD_STATE: + int dst_off = src[i+1] & 0x1FFF; + int state_src = (src[i+1] >> 16) & 3; + int state_block = (src[i+1] >> 18) & 7; + int state_type = src[i+2] & 3; + int num_unit = (src[i+1] & 0xffc00000) >> 22; + printf(" dst_off: %x state_src: %d state_block: %d state_type: %d num_unit: %d\n", + dst_off, state_src, state_block, state_type, num_unit); + addr = (uint64_t)(src[i+2] & 0xfffffffc) | ((uint64_t)(src[i+3]) << 32); + if (state_block == 5 && state_type == 0) { + if (!(addr&0xFFF)) { + int len = 0x1000; + if (num_unit >= 32) len += 0x1000; + //hexdump((uint32_t *)addr, len); + char fn[0x100]; + snprintf(fn, sizeof(fn), "/tmp/0x%lx.shader", addr); + printf("dumping %s\n", fn); + FILE *f = fopen(fn, "wb"); + // groups of 16 instructions + fwrite((void*)addr, 1, len, f); + fclose(f); + } + } + break; + } + } + + /*if (pkttype == 4) { + switch (cp_type4_base_index_one_reg_wr(src[i])) { + case REG_A5XX_SP_CS_CTRL_REG0: + addr = (uint64_t)(src[i+4] & 0xfffffffc) | ((uint64_t)(src[i+5]) << 32); + hexdump((uint32_t *)addr, 0x1000); + break; + } + }*/ + + /*if (pkttype == 4 && cp_type4_base_index_one_reg_wr(src[i]) == REG_A5XX_TPL1_CS_TEX_CONST_LO) { + uint64_t addr = (uint64_t)(src[i+1] & 0xffffffff) | ((uint64_t)(src[i+2]) << 32); + hexdump((uint32_t *)addr, 0x40); + } + + if (pkttype == 4 && cp_type4_base_index_one_reg_wr(src[i]) == REG_A5XX_TPL1_CS_TEX_SAMP_LO) { + uint64_t addr = (uint64_t)(src[i+1] & 0xffffffff) | ((uint64_t)(src[i+2]) << 32); + hexdump((uint32_t *)addr, 0x40); + }*/ + + if (pkttype == -1) break; + i += (1+pktsize); + } + assert(i == len); +} diff --git a/selfdrive/modeld/thneed/debug/microbenchmark/gemm.cl b/selfdrive/modeld/thneed/debug/microbenchmark/gemm.cl new file mode 100644 index 0000000000..6a55406aee --- /dev/null +++ b/selfdrive/modeld/thneed/debug/microbenchmark/gemm.cl @@ -0,0 +1,51 @@ +// https://github.com/moskewcz/boda/issues/13 + +#define USE_FP16 + +#ifdef USE_FP16 + #define up(x) x + #define down(x) x + #define xtype half8 + #define skip 128 +#else + #define up(x) convert_float8(x) + #define down(x) convert_half8(x) + #define xtype float8 + #define skip 128 +#endif + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +__kernel void gemm(const int M, const int N, const int K, + global const half8* a, global const half8* b, global half8* c ) +{ + xtype c_r[8] = {0,0,0,0,0,0,0,0}; + + int const a_off_thr = get_global_id(0); + int const b_off_thr = get_global_id(1); + + int a_off = a_off_thr; + int b_off = b_off_thr; + for( int k = 0; k < 1024; k += 1 ) { + xtype a_r = up(a[a_off]); + xtype b_r = up(b[b_off]); + + c_r[0] += a_r.s0*b_r; + c_r[1] += a_r.s1*b_r; + c_r[2] += a_r.s2*b_r; + c_r[3] += a_r.s3*b_r; + c_r[4] += a_r.s4*b_r; + c_r[5] += a_r.s5*b_r; + c_r[6] += a_r.s6*b_r; + c_r[7] += a_r.s7*b_r; + + a_off += skip; + b_off += skip; + } + + int c_off = a_off_thr*1024 + b_off_thr; + for (int i = 0; i < 8; i++) { + c[c_off] = down(c_r[i]); + c_off += skip; + } +} + diff --git a/selfdrive/modeld/thneed/debug/microbenchmark/gemm_image.cl b/selfdrive/modeld/thneed/debug/microbenchmark/gemm_image.cl new file mode 100644 index 0000000000..46b6bf6ef8 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/microbenchmark/gemm_image.cl @@ -0,0 +1,75 @@ +// https://github.com/moskewcz/boda/issues/13 + +//#define USE_FP16 + +#ifdef USE_FP16 + #define xtype half4 + #define read_imagep read_imageh + #define write_imagep write_imageh +#else + #define xtype float4 + #define read_imagep read_imagef + #define write_imagep write_imagef +#endif + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +__kernel void gemm(const int M, const int N, const int K, + read_only image2d_t A, + read_only image2d_t B, + write_only image2d_t C) +{ + const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | + CLK_ADDRESS_CLAMP | + CLK_FILTER_NEAREST; + + xtype c_r[4] = {0,0,0,0}; + xtype a_r[4], b_r[4]; + + int const a_off_thr = get_global_id(0); + int const b_off_thr = get_global_id(1); + + int2 a_samp = {0, a_off_thr}; + int2 b_samp = {0, b_off_thr}; + + for (short k = 0; k < K/4; k++) { + for (short i = 0; i < 4; ++i) { + a_r[i] = read_imagep(A, smp, a_samp); + b_r[i] = read_imagep(B, smp, b_samp); + ++a_samp.x; + ++b_samp.x; + } + + for (short i = 0; i < 4; ++i) { + float4 ov = c_r[i]; + + ov.x += a_r[i].x * b_r[0].x; + ov.x += a_r[i].y * b_r[0].y; + ov.x += a_r[i].z * b_r[0].z; + ov.x += a_r[i].w * b_r[0].w; + + ov.y += a_r[i].x * b_r[1].x; + ov.y += a_r[i].y * b_r[1].y; + ov.y += a_r[i].z * b_r[1].z; + ov.y += a_r[i].w * b_r[1].w; + + ov.z += a_r[i].x * b_r[2].x; + ov.z += a_r[i].y * b_r[2].y; + ov.z += a_r[i].z * b_r[2].z; + ov.z += a_r[i].w * b_r[2].w; + + ov.w += a_r[i].x * b_r[3].x; + ov.w += a_r[i].y * b_r[3].y; + ov.w += a_r[i].z * b_r[3].z; + ov.w += a_r[i].w * b_r[3].w; + + c_r[i] = ov; + } + } + + int2 c_samp = {a_off_thr, b_off_thr*4}; + for (short i = 0; i < 4; i++) { + write_imagep(C, c_samp, c_r[i]); + ++c_samp.y; + } +} + diff --git a/selfdrive/modeld/thneed/debug/microbenchmark/go.c b/selfdrive/modeld/thneed/debug/microbenchmark/go.c new file mode 100644 index 0000000000..5635085638 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/microbenchmark/go.c @@ -0,0 +1,314 @@ +#include +#include +#include +#include +#include + +/* +block7b_project_conv (Conv2D) (None, 8, 16, 352) 743424 block7b_activation[0][0] +8448*8*4 = 8*16*2112 = 270336 = input = 128*2112 +2112*88*4 = 743424 = weights = 2112*352 +1408*8*4 = 8*16*352 = 45056 = output = 128*352 + +FLOPS = 128*2112*352 = 95158272 = 95 MFLOPS +RAM = 128*2112 + 2112*352 + 128*352 = 1058816 = 1 M accesses + +# 22 groups +128*2112 + 2112*16 + 128*16 = 306176 +306176*22 = 6735872 real accesses + +This is a 128x2112 by 2112x352 matrix multiply + +work_size = {88, 4, 8} +Each kernel run computes 16 outputs + +0x7f7e8a6380 convolution_horizontal_reduced_reads_1x1 -- 88 4 8 -- 4 4 8 + image2d_t input = 0x7f7f490b00 image 8448 x 8 rp 67840 + short startPackedInputChannel = 0 + short numPackedInputChannelsForGroup = 528 + short totalNumPackedInputChannels = 528 + short packedOuputChannelOffset = 0 + short totalNumPackedOutputChannels = 88 + image2d_t weights = 0x7f7f52fb80 image 2112 x 88 rp 16896 + float* biases = 0x7f7f564d80 buffer 1408 + short filterSizeX = 1 + short filterSizeY = 1 + image2d_t output = 0x7f7f490e80 image 1408 x 8 rp 11264 + short paddingX = 0 + short paddingY = 0 + short strideX = 1 + short strideY = 1 + short neuron = 0 + float a = 1.000000 + float b = 1.000000 + float min_clamp = 0.000000 + float max_clamp = 0.000000 + float* parameters = 0x0 + float* batchNormBiases = 0x0 + short numOutputColumns = 16 +*/ + +#define GEMM +#define IMAGE + +void dump_maps() { + FILE *f = fopen("/proc/self/maps", "rb"); + char maps[0x100000]; + int len = fread(maps, 1, sizeof(maps), f); + maps[len] = '\0'; + maps[0x800] = '\0'; + fclose(f); + printf("%s\n", maps); +} + +static inline uint64_t nanos_since_boot() { + struct timespec t; + clock_gettime(CLOCK_BOOTTIME, &t); + return t.tv_sec * 1000000000ULL + t.tv_nsec; +} + +int main(int argc, char *argv[]) { + cl_int err; + + // cl init + cl_device_id device_id; + cl_context context; + cl_command_queue q; + { + cl_platform_id platform_id[2]; + cl_uint num_devices; + cl_uint num_platforms; + + err = clGetPlatformIDs(sizeof(platform_id)/sizeof(cl_platform_id), platform_id, &num_platforms); + assert(err == 0); + + err = clGetDeviceIDs(platform_id[0], CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &num_devices); + assert(err == 0); + + context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &err); + assert(err == 0); + + q = clCreateCommandQueue(context, device_id, 0, &err); + assert(err == 0); + } + printf("cl ready\n"); + + char tmp[0x10000]; + memset(tmp, 0, sizeof(tmp)); + FILE *f = fopen(argv[1], "rb"); + fread(tmp, 1, sizeof(tmp), f); + fclose(f); + + const char *strings[1]; + size_t lengths[1]; + strings[0] = tmp; + lengths[0] = strlen(tmp); + + cl_program prog = clCreateProgramWithSource(context, 1, strings, lengths, &err); + assert(err == 0); + printf("creating program\n"); + + err = clBuildProgram(prog, 1, &device_id, "-D AVANTE_IS_GPU_A530_64", NULL, NULL); + + if (err != 0) { + printf("got err %d\n", err); + size_t length; + char buffer[2048]; + clGetProgramBuildInfo(prog, device_id, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &length); + buffer[length] = '\0'; + printf("%s\n", buffer); + } + assert(err == 0); + printf("built program\n"); + + +#ifdef GEMM + // 128x2112 by 2112x352 + int M,N,K; + + M = N = K = 1024; + //M = 128; K = 2112; N = 352; + + cl_kernel kern = clCreateKernel(prog, "gemm", &err); + assert(err == 0); + printf("creating kernel %p\n", kern); + + cl_mem A,B,C; + A = clCreateBuffer(context, CL_MEM_READ_WRITE, M*K*2, NULL, &err); + assert(err == 0); + B = clCreateBuffer(context, CL_MEM_READ_WRITE, K*N*2, NULL, &err); + assert(err == 0); + C = clCreateBuffer(context, CL_MEM_READ_WRITE, M*N*2, NULL, &err); + assert(err == 0); + printf("created buffers\n"); + +#ifdef IMAGE + cl_image_format fmt; + fmt.image_channel_order = CL_RGBA; + fmt.image_channel_data_type = CL_HALF_FLOAT; + + cl_image_desc desc; + desc.image_type = CL_MEM_OBJECT_IMAGE2D; + desc.image_depth = 0; desc.image_slice_pitch = 0; desc.num_mip_levels = 0; desc.num_samples = 0; + + desc.image_width = K; desc.image_height = M/4; + desc.buffer = A; + desc.image_row_pitch = desc.image_width*8; + A = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + + desc.image_width = K; desc.image_height = N/4; + desc.buffer = B; desc.image_row_pitch = desc.image_width*8; + B = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + + desc.image_width = M/4; desc.image_height = N; + desc.buffer = C; desc.image_row_pitch = desc.image_width*8; + C = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + printf("created images\n"); +#endif + + clSetKernelArg(kern, 0, sizeof(int), &M); + clSetKernelArg(kern, 1, sizeof(int), &N); + clSetKernelArg(kern, 2, sizeof(int), &K); + + clSetKernelArg(kern, 3, sizeof(cl_mem), &A); + clSetKernelArg(kern, 4, sizeof(cl_mem), &B); + clSetKernelArg(kern, 5, sizeof(cl_mem), &C); + printf("set args\n"); + +#ifdef IMAGE + size_t global_work_size[3] = {M/4, N/4, 1}; + size_t local_work_size[3] = {4, 64, 1}; +#else + size_t global_work_size[3] = {128, 128, 1}; + size_t local_work_size[3] = {2, 128, 1}; +#endif + +#else + cl_kernel kern = clCreateKernel(prog, "convolution_horizontal_reduced_reads_1x1", &err); + assert(err == 0); + printf("creating kernel\n"); + + cl_mem input; + cl_mem weights; + cl_mem weights_buffer; + cl_mem biases; + cl_mem outputs; + + cl_image_format fmt; + fmt.image_channel_order = CL_RGBA; + fmt.image_channel_data_type = CL_HALF_FLOAT; + + cl_image_desc desc; + desc.image_type = CL_MEM_OBJECT_IMAGE2D; + desc.image_depth = 0; desc.image_slice_pitch = 0; desc.num_mip_levels = 0; desc.num_samples = 0; + desc.buffer = NULL; + + biases = clCreateBuffer(context, CL_MEM_READ_WRITE, 1408, NULL, &err); + assert(err == 0); + + desc.image_width = 8448; desc.image_height = 8; desc.image_row_pitch = 67840; + desc.buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, desc.image_height * desc.image_row_pitch, NULL, &err); + assert(err == 0); + input = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + + desc.image_width = 2112; desc.image_height = 88; desc.image_row_pitch = 16896; + weights_buffer = desc.buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, desc.image_height * desc.image_row_pitch, NULL, &err); + assert(err == 0); + weights = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + + desc.image_width = 1408; desc.image_height = 8; desc.image_row_pitch = 11264; + desc.buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, desc.image_height * desc.image_row_pitch, NULL, &err); + assert(err == 0); + outputs = clCreateImage(context, CL_MEM_READ_WRITE, &fmt, &desc, NULL, &err); + assert(err == 0); + + void *n = NULL; + uint16_t v; + float fl; + + clSetKernelArg(kern, 0, sizeof(cl_mem), &input); + v = 0; clSetKernelArg(kern, 1, sizeof(v), &v); + v = 528; clSetKernelArg(kern, 2, sizeof(v), &v); + v = 528; clSetKernelArg(kern, 3, sizeof(v), &v); + v = 0; clSetKernelArg(kern, 4, sizeof(v), &v); + v = 88; clSetKernelArg(kern, 5, sizeof(v), &v); + clSetKernelArg(kern, 6, sizeof(cl_mem), &weights); + //clSetKernelArg(kern, 6, sizeof(cl_mem), &weights_buffer); + clSetKernelArg(kern, 7, sizeof(cl_mem), &biases); + v = 1; clSetKernelArg(kern, 8, sizeof(v), &v); + v = 1; clSetKernelArg(kern, 9, sizeof(v), &v); + clSetKernelArg(kern, 10, sizeof(cl_mem), &outputs); + v = 0; clSetKernelArg(kern, 11, sizeof(v), &v); + v = 0; clSetKernelArg(kern, 12, sizeof(v), &v); + v = 1; clSetKernelArg(kern, 13, sizeof(v), &v); + v = 1; clSetKernelArg(kern, 14, sizeof(v), &v); + v = 0; clSetKernelArg(kern, 15, sizeof(v), &v); + fl = 1.0; clSetKernelArg(kern, 16, sizeof(fl), &fl); + fl = 0.0; clSetKernelArg(kern, 17, sizeof(fl), &fl); + fl = 0.0; clSetKernelArg(kern, 18, sizeof(fl), &fl); + fl = 0.0; clSetKernelArg(kern, 19, sizeof(fl), &fl); + clSetKernelArg(kern, 20, sizeof(n), &n); + clSetKernelArg(kern, 21, sizeof(n), &n); + v = 16; clSetKernelArg(kern, 22, sizeof(v), &v); + + size_t global_work_size[3] = {88, 4, 8}; + size_t local_work_size[3] = {4, 4, 8}; +#endif + + printf("ready to enqueue\n"); + for (int i = 0; i < 20; i++) { + cl_event event; + err = clEnqueueNDRangeKernel(q, kern, 3, NULL, global_work_size, local_work_size, 0, NULL, &event); + assert(err == 0); + + uint64_t tb = nanos_since_boot(); + err = clWaitForEvents(1, &event); + assert(err == 0); + uint64_t te = nanos_since_boot(); + uint64_t us = (te-tb)/1000; + + float s = 1000000.0/us; + +#ifdef GEMM + float flops = M*N*K*s; + float rams = (M*N + N*K + M*K)*s; +#else + float flops = 95158272.0*s; + float rams = 1058816.0*s; + //float rams = 6735872.0*s; +#endif + + printf("%2d: wait %lu us -- %.2f GFLOPS -- %.2f GB/s\n", i, us, flops/1e9, rams*2/1e9); + } + + size_t binary_size = 0; + err = clGetProgramInfo(prog, CL_PROGRAM_BINARY_SIZES, sizeof(binary_size), &binary_size, NULL); + assert(err == 0); + assert(binary_size > 0); + + uint8_t *binary_buf = (uint8_t *)malloc(binary_size); + assert(binary_buf); + + uint8_t* bufs[1] = { binary_buf, }; + err = clGetProgramInfo(prog, CL_PROGRAM_BINARIES, sizeof(bufs), &bufs, NULL); + assert(err == 0); + + FILE *g = fopen("/tmp/bin.bin", "wb"); + fwrite(binary_buf, 1, binary_size, g); + fclose(g); + + /*dump_maps(); + for (uint64_t i = 0x7ffbd2000; i < 0x800000000; i += 0x1000) { + uint64_t cmd = *((uint64_t*)i); + printf("%llx: %llx\n", i, cmd); + }*/ + + + return 0; +} + diff --git a/selfdrive/modeld/thneed/debug/microbenchmark/run.sh b/selfdrive/modeld/thneed/debug/microbenchmark/run.sh new file mode 100755 index 0000000000..f77d27d817 --- /dev/null +++ b/selfdrive/modeld/thneed/debug/microbenchmark/run.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +gcc -I/data/openpilot/phonelibs/opencl/include -L/system/vendor/lib64 -lOpenCL -lCB -lgsl go.c diff --git a/selfdrive/modeld/thneed/debug/test.cc b/selfdrive/modeld/thneed/debug/test.cc index b2ac600895..6f185b9f00 100644 --- a/selfdrive/modeld/thneed/debug/test.cc +++ b/selfdrive/modeld/thneed/debug/test.cc @@ -8,8 +8,9 @@ void hexdump(uint32_t *d, int len); int main(int argc, char* argv[]) { - float *output = (float*)calloc(0x10000, sizeof(float)); - float *golden = (float*)calloc(0x10000, sizeof(float)); + #define OUTPUT_SIZE 0x10000 + float *output = (float*)calloc(OUTPUT_SIZE, sizeof(float)); + float *golden = (float*)calloc(OUTPUT_SIZE, sizeof(float)); SNPEModel mdl(argv[1], output, 0, USE_GPU_RUNTIME); // cmd line test @@ -59,36 +60,39 @@ int main(int argc, char* argv[]) { // first run printf("************** execute 1 **************\n"); - memset(output, 0, sizeof(output)); + memset(output, 0, OUTPUT_SIZE * sizeof(float)); mdl.execute(input, 0); hexdump((uint32_t *)output, 0x100); - memcpy(golden, output, sizeof(output)); + memcpy(golden, output, OUTPUT_SIZE * sizeof(float)); // second run printf("************** execute 2 **************\n"); - memset(output, 0, sizeof(output)); + memset(output, 0, OUTPUT_SIZE * sizeof(float)); Thneed *t = new Thneed(); - t->record = 3; // debug print with record + t->record = 7; // debug print with record mdl.execute(input, 0); t->stop(); hexdump((uint32_t *)output, 0x100); - if (memcmp(golden, output, sizeof(output)) != 0) { printf("FAILURE\n"); return -1; } + if (memcmp(golden, output, OUTPUT_SIZE * sizeof(float)) != 0) { printf("FAILURE\n"); return -1; } // third run printf("************** execute 3 **************\n"); - memset(output, 0, sizeof(output)); + memset(output, 0, OUTPUT_SIZE * sizeof(float)); t->record = 2; // debug print w/o record float *inputs[4] = {state, traffic_convention, desire, input}; - t->execute(inputs, output); + t->execute(inputs, output, true); hexdump((uint32_t *)output, 0x100); - if (memcmp(golden, output, sizeof(output)) != 0) { printf("FAILURE\n"); return -1; } + if (memcmp(golden, output, OUTPUT_SIZE * sizeof(float)) != 0) { printf("FAILURE\n"); return -1; } printf("************** execute 4 **************\n"); - memset(output, 0, sizeof(output)); - //t->record = 2; // debug print w/o record - t->execute(inputs, output); - hexdump((uint32_t *)output, 0x100); - if (memcmp(golden, output, sizeof(output)) != 0) { printf("FAILURE\n"); return -1; } + while (1) { + memset(output, 0, OUTPUT_SIZE * sizeof(float)); + //t->record = 2; // debug print w/o record + t->execute(inputs, output); + hexdump((uint32_t *)output, 0x100); + if (memcmp(golden, output, OUTPUT_SIZE * sizeof(float)) != 0) { printf("FAILURE\n"); return -1; } + break; + } printf("************** execute done **************\n"); } diff --git a/selfdrive/modeld/thneed/thneed.cc b/selfdrive/modeld/thneed/thneed.cc index 826015999f..785463780e 100644 --- a/selfdrive/modeld/thneed/thneed.cc +++ b/selfdrive/modeld/thneed/thneed.cc @@ -13,7 +13,8 @@ std::map, std::string> g_args; static inline uint64_t nanos_since_boot() { struct timespec t; clock_gettime(CLOCK_BOOTTIME, &t); - return t.tv_sec * 1000000000ULL + t.tv_nsec; } + return t.tv_sec * 1000000000ULL + t.tv_nsec; +} void hexdump(uint32_t *d, int len) { assert((len%4) == 0); @@ -43,10 +44,12 @@ int ioctl(int filedes, unsigned long request, void *argp) { thneed->timestamp = cmd->timestamp; thneed->context_id = cmd->context_id; CachedCommand *ccmd = new CachedCommand(thneed, cmd); + //ccmd->disassemble(); thneed->cmds.push_back(ccmd); } if (thneed->record & 2) { - printf("IOCTL_KGSL_GPU_COMMAND: flags: 0x%lx context_id: %u timestamp: %u\n", + printf("IOCTL_KGSL_GPU_COMMAND(%2zu): flags: 0x%lx context_id: %u timestamp: %u\n", + thneed->cmds.size(), cmd->flags, cmd->context_id, cmd->timestamp); } @@ -179,7 +182,10 @@ void Thneed::stop() { //#define SAVE_LOG -void Thneed::execute(float **finputs, float *foutput) { +void Thneed::execute(float **finputs, float *foutput, bool slow) { + uint64_t tb, te; + if (record & 2) tb = nanos_since_boot(); + #ifdef SAVE_LOG char fn[0x100]; snprintf(fn, sizeof(fn), "/tmp/thneed_log_%d", timestamp); @@ -197,7 +203,7 @@ void Thneed::execute(float **finputs, float *foutput) { #endif if (record & 2) printf("copying %lu -- %p -> %p\n", sz, finputs[idx], inputs[idx]); - clEnqueueWriteBuffer(command_queue, inputs[idx], CL_TRUE, 0, sz, finputs[idx], 0, NULL, NULL); + //clEnqueueWriteBuffer(command_queue, inputs[idx], CL_TRUE, 0, sz, finputs[idx], 0, NULL, NULL); } // ****** set power constraint @@ -220,8 +226,9 @@ void Thneed::execute(float **finputs, float *foutput) { // ****** run commands int i = 0; for (auto it = cmds.begin(); it != cmds.end(); ++it) { + ++i; if (record & 2) printf("run %2d: ", i); - (*it)->exec((++i) == cmds.size()); + (*it)->exec((i == cmds.size()) || slow); } // ****** sync objects @@ -255,6 +262,11 @@ void Thneed::execute(float **finputs, float *foutput) { ret = ioctl(fd, IOCTL_KGSL_SETPROPERTY, &prop); assert(ret == 0); + + if (record & 2) { + te = nanos_since_boot(); + printf("model exec in %lu us\n", (te-tb)/1000); + } } cl_int (*my_clSetKernelArg)(cl_kernel kernel, cl_uint arg_index, size_t arg_size, const void *arg_value) = NULL; @@ -311,10 +323,19 @@ cl_int clEnqueueNDRangeKernel(cl_command_queue command_queue, } } } - + if (thneed != NULL && thneed->record & 2) { + printf("%p %56s -- ", kernel, name); + for (int i = 0; i < work_dim; i++) { + printf("%4zu ", global_work_size[i]); + } + printf(" -- "); + for (int i = 0; i < work_dim; i++) { + printf("%4zu ", local_work_size[i]); + } + printf("\n"); + } if (thneed != NULL && thneed->record & 4) { // extreme debug - printf("%s -- %p\n", name, kernel); for (int i = 0; i < num_args; i++) { char arg_type[0x100]; char arg_name[0x100]; @@ -337,6 +358,29 @@ cl_int clEnqueueNDRangeKernel(cl_command_queue command_queue, } else if (arg_size == 8) { cl_mem val = (cl_mem)(*((uintptr_t*)arg_value)); printf(" = %p", val); + if (val != NULL) { + if (strcmp("image2d_t", arg_type) == 0 || strcmp("image1d_t", arg_type) == 0) { + cl_image_format format; + size_t width, height, depth, array_size, row_pitch, slice_pitch; + clGetImageInfo(val, CL_IMAGE_FORMAT, sizeof(format), &format, NULL); + assert(format.image_channel_data_type == CL_HALF_FLOAT); + clGetImageInfo(val, CL_IMAGE_WIDTH, sizeof(width), &width, NULL); + clGetImageInfo(val, CL_IMAGE_HEIGHT, sizeof(height), &height, NULL); + clGetImageInfo(val, CL_IMAGE_DEPTH, sizeof(depth), &depth, NULL); + clGetImageInfo(val, CL_IMAGE_ARRAY_SIZE, sizeof(array_size), &array_size, NULL); + clGetImageInfo(val, CL_IMAGE_ROW_PITCH, sizeof(row_pitch), &row_pitch, NULL); + clGetImageInfo(val, CL_IMAGE_SLICE_PITCH, sizeof(slice_pitch), &slice_pitch, NULL); + assert(depth == 0); + assert(array_size == 0); + assert(slice_pitch == 0); + + printf(" image %zu x %zu rp %zu", width, height, row_pitch); + } else { + size_t sz; + clGetMemObjectInfo(val, CL_MEM_SIZE, sizeof(sz), &sz, NULL); + printf(" buffer %zu", sz); + } + } } printf("\n"); } @@ -345,6 +389,53 @@ cl_int clEnqueueNDRangeKernel(cl_command_queue command_queue, cl_int ret = my_clEnqueueNDRangeKernel(command_queue, kernel, work_dim, global_work_offset, global_work_size, local_work_size, num_events_in_wait_list, event_wait_list, event); + + /*uint64_t tb = nanos_since_boot(); + clWaitForEvents(1, event); + uint64_t te = nanos_since_boot(); + if (thneed != NULL && thneed->record & 2) { + printf(" wait %lu us\n", (te-tb)/1000); + }*/ + + return ret; +} + +//#define SAVE_KERNELS + +#ifdef SAVE_KERNELS + std::map program_source; +#endif + +cl_program (*my_clCreateProgramWithSource)(cl_context context, cl_uint count, const char **strings, const size_t *lengths, cl_int *errcode_ret) = NULL; +cl_program clCreateProgramWithSource(cl_context context, cl_uint count, const char **strings, const size_t *lengths, cl_int *errcode_ret) { + if (my_clCreateProgramWithSource == NULL) my_clCreateProgramWithSource = reinterpret_cast(dlsym(RTLD_NEXT, "REAL_clCreateProgramWithSource")); + assert(count == 1); + size_t my_lengths[1]; + my_lengths[0] = lengths[0]; + +#ifdef SAVE_KERNELS + char fn[0x100]; + snprintf(fn, sizeof(fn), "/tmp/program_%zu.cl", strlen(strings[0])); + FILE *f = fopen(fn, "wb"); + fprintf(f, "%s", strings[0]); + fclose(f); + + char tmp[0x10000]; + memset(tmp, 0, sizeof(tmp)); + snprintf(fn, sizeof(fn), "/tmp/patched_%zu.cl", strlen(strings[0])); + FILE *g = fopen(fn, "rb"); + if (g != NULL) { + printf("LOADING PATCHED PROGRAM %s\n", fn); + fread(tmp, 1, sizeof(tmp), g); + fclose(g); + strings[0] = tmp; + my_lengths[0] = strlen(tmp); + } + + program_source[ret] = strings[0]; +#endif + + cl_program ret = my_clCreateProgramWithSource(context, count, strings, my_lengths, errcode_ret); return ret; } @@ -356,6 +447,8 @@ void *dlsym(void *handle, const char *symbol) { return (void*)clEnqueueNDRangeKernel; } else if (strcmp("clSetKernelArg", symbol) == 0) { return (void*)clSetKernelArg; + } else if (strcmp("clCreateProgramWithSource", symbol) == 0) { + return (void*)clCreateProgramWithSource; } else { return my_dlsym(handle, symbol); } diff --git a/selfdrive/modeld/thneed/thneed.h b/selfdrive/modeld/thneed/thneed.h index 9f35f5dcfb..89e8522570 100644 --- a/selfdrive/modeld/thneed/thneed.h +++ b/selfdrive/modeld/thneed/thneed.h @@ -20,6 +20,7 @@ class CachedCommand { public: CachedCommand(Thneed *lthneed, struct kgsl_gpu_command *cmd); void exec(bool wait); + void disassemble(); private: struct kgsl_gpu_command cache; struct kgsl_command_object cmds[2]; @@ -31,7 +32,7 @@ class Thneed { public: Thneed(); void stop(); - void execute(float **finputs, float *foutput); + void execute(float **finputs, float *foutput, bool slow=false); std::vector inputs; cl_mem output;