get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/40808/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 40808,
    "url": "http://patchwork.dpdk.org/api/patches/40808/?format=api",
    "web_url": "http://patchwork.dpdk.org/project/dpdk/patch/1528447355-29411-3-git-send-email-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1528447355-29411-3-git-send-email-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1528447355-29411-3-git-send-email-konstantin.ananyev@intel.com",
    "date": "2018-06-08T08:42:34",
    "name": "[dpdk-dev,2/3] bpf: add extra validation for input BPF program",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "629abaee4900ba25ce4defc9f2e649cd83a48818",
    "submitter": {
        "id": 33,
        "url": "http://patchwork.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patchwork.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patchwork.dpdk.org/project/dpdk/patch/1528447355-29411-3-git-send-email-konstantin.ananyev@intel.com/mbox/",
    "series": [
        {
            "id": 43,
            "url": "http://patchwork.dpdk.org/api/series/43/?format=api",
            "web_url": "http://patchwork.dpdk.org/project/dpdk/list/?series=43",
            "date": "2018-06-08T08:42:32",
            "name": "bpf: extend validation of input BPF programs",
            "version": 1,
            "mbox": "http://patchwork.dpdk.org/series/43/mbox/"
        }
    ],
    "comments": "http://patchwork.dpdk.org/api/patches/40808/comments/",
    "check": "success",
    "checks": "http://patchwork.dpdk.org/api/patches/40808/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 8457E1B1BA;\n\tFri,  8 Jun 2018 10:42:55 +0200 (CEST)",
            "from mga01.intel.com (mga01.intel.com [192.55.52.88])\n\tby dpdk.org (Postfix) with ESMTP id 1379D1B16A\n\tfor <dev@dpdk.org>; Fri,  8 Jun 2018 10:42:51 +0200 (CEST)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n\tby fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t08 Jun 2018 01:42:50 -0700",
            "from sivswdev02.ir.intel.com (HELO localhost.localdomain)\n\t([10.237.217.46])\n\tby orsmga007.jf.intel.com with ESMTP; 08 Jun 2018 01:42:49 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.49,490,1520924400\"; d=\"scan'208\";a=\"47424174\"",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": [
            "dev@dpdk.org",
            "dev@dpdk.org"
        ],
        "Cc": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "Date": "Fri,  8 Jun 2018 09:42:34 +0100",
        "Message-Id": "<1528447355-29411-3-git-send-email-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1528447355-29411-1-git-send-email-konstantin.ananyev@intel.com>",
        "References": "<1528447355-29411-1-git-send-email-konstantin.ananyev@intel.com>",
        "Subject": "[dpdk-dev] [PATCH 2/3] bpf: add extra validation for input BPF\n\tprogram",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Add checks for:\n - use/return uninitialized registers and/or stack data\n - possible memory access boundaries violation\n - invalid arguments for the function\n\nSigned-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n lib/librte_bpf/bpf_validate.c | 1136 +++++++++++++++++++++++++++++++++++++++--\n 1 file changed, 1100 insertions(+), 36 deletions(-)",
    "diff": "diff --git a/lib/librte_bpf/bpf_validate.c b/lib/librte_bpf/bpf_validate.c\nindex b7081c853..83983efc4 100644\n--- a/lib/librte_bpf/bpf_validate.c\n+++ b/lib/librte_bpf/bpf_validate.c\n@@ -11,9 +11,28 @@\n \n #include <rte_common.h>\n #include <rte_eal.h>\n+#include <rte_byteorder.h>\n \n #include \"bpf_impl.h\"\n \n+struct bpf_reg_val {\n+\tstruct rte_bpf_arg v;\n+\tuint64_t mask;\n+\tstruct {\n+\t\tint64_t min;\n+\t\tint64_t max;\n+\t} s;\n+\tstruct {\n+\t\tuint64_t min;\n+\t\tuint64_t max;\n+\t} u;\n+};\n+\n+struct bpf_eval_state {\n+\tstruct bpf_reg_val rv[EBPF_REG_NUM];\n+\tstruct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];\n+};\n+\n /* possible instruction node colour */\n enum {\n \tWHITE,\n@@ -31,14 +50,6 @@ enum {\n \tMAX_EDGE_TYPE\n };\n \n-struct bpf_reg_state {\n-\tuint64_t val;\n-};\n-\n-struct bpf_eval_state {\n-\tstruct bpf_reg_state rs[EBPF_REG_NUM];\n-};\n-\n #define\tMAX_EDGES\t2\n \n struct inst_node {\n@@ -54,12 +65,13 @@ struct inst_node {\n struct bpf_verifier {\n \tconst struct rte_bpf_prm *prm;\n \tstruct inst_node *in;\n-\tint32_t stack_sz;\n+\tuint64_t stack_sz;\n \tuint32_t nb_nodes;\n \tuint32_t nb_jcc_nodes;\n \tuint32_t node_colour[MAX_NODE_COLOUR];\n \tuint32_t edge_type[MAX_EDGE_TYPE];\n \tstruct bpf_eval_state *evst;\n+\tstruct inst_node *evin;\n \tstruct {\n \t\tuint32_t num;\n \t\tuint32_t cur;\n@@ -101,40 +113,823 @@ check_alu_bele(const struct ebpf_insn *ins)\n }\n \n static const char *\n-eval_stack(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+{\n+\tRTE_SET_USED(ins);\n+\tif (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)\n+\t\treturn \"undefined return value\";\n+\treturn NULL;\n+}\n+\n+/* setup max possible with this mask bounds */\n+static void\n+eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\trv->u.max = mask;\n+\trv->u.min = 0;\n+}\n+\n+static void\n+eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\trv->s.max = mask >> 1;\n+\trv->s.min = rv->s.max ^ UINT64_MAX;\n+}\n+\n+static void\n+eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\teval_umax_bound(rv, mask);\n+\teval_smax_bound(rv, mask);\n+}\n+\n+static void\n+eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\teval_max_bound(rv, mask);\n+\trv->v.type = RTE_BPF_ARG_RAW;\n+\trv->mask = mask;\n+}\n+\n+static void\n+eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)\n+{\n+\trv->mask = mask;\n+\trv->s.min = val;\n+\trv->s.max = val;\n+\trv->u.min = val;\n+\trv->u.max = val;\n+}\n+\n+static void\n+eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)\n+{\n+\tuint64_t v;\n+\n+\tv = (uint64_t)imm & mask;\n+\n+\trv->v.type = RTE_BPF_ARG_RAW;\n+\teval_fill_imm64(rv, mask, v);\n+}\n+\n+static const char *\n+eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n {\n-\tint32_t ofs;\n+\tuint32_t i;\n+\tuint64_t val;\n+\tstruct bpf_reg_val *rd;\n+\n+\tval = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;\n \n-\tofs = ins->off;\n+\trd = bvf->evst->rv + ins->dst_reg;\n+\trd->v.type = RTE_BPF_ARG_RAW;\n+\teval_fill_imm64(rd, UINT64_MAX, val);\n \n-\tif (ofs >= 0 || ofs < -MAX_BPF_STACK_SIZE)\n-\t\treturn \"stack boundary violation\";\n+\tfor (i = 0; i != bvf->prm->nb_xsym; i++) {\n+\n+\t\t/* load of external variable */\n+\t\tif (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&\n+\t\t\t\t(uintptr_t)bvf->prm->xsym[i].var.val == val) {\n+\t\t\trd->v = bvf->prm->xsym[i].var.desc;\n+\t\t\teval_fill_imm64(rd, UINT64_MAX, 0);\n+\t\t\tbreak;\n+\t\t}\n+\t}\n \n-\tofs = -ofs;\n-\tbvf->stack_sz = RTE_MAX(bvf->stack_sz, ofs);\n \treturn NULL;\n }\n \n+static void\n+eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\tstruct bpf_reg_val rt;\n+\n+\trt.u.min = rv->u.min & mask;\n+\trt.u.max = rv->u.max & mask;\n+\tif (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {\n+\t\trv->u.max = RTE_MAX(rt.u.max, mask);\n+\t\trv->u.min = 0;\n+\t}\n+\n+\teval_smax_bound(&rt, mask);\n+\trv->s.max = RTE_MIN(rt.s.max, rv->s.max);\n+\trv->s.min = RTE_MAX(rt.s.min, rv->s.min);\n+\n+\trv->mask = mask;\n+}\n+\n+static void\n+eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)\n+{\n+\tstruct bpf_reg_val rv;\n+\n+\trv.u.min = (rd->u.min + rs->u.min) & msk;\n+\trv.u.max = (rd->u.min + rs->u.max) & msk;\n+\trv.s.min = (rd->s.min + rs->s.min) & msk;\n+\trv.s.max = (rd->s.max + rs->s.max) & msk;\n+\n+\t/*\n+\t * if at least one of the operands is not constant,\n+\t * then check for overflow\n+\t */\n+\tif ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&\n+\t\t\t(rv.u.min < rd->u.min || rv.u.max < rd->u.max))\n+\t\teval_umax_bound(&rv, msk);\n+\n+\tif ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&\n+\t\t\t(((rs->s.min < 0 && rv.s.min > rd->s.min) ||\n+\t\t\trv.s.min < rd->s.min) ||\n+\t\t\t((rs->s.max < 0 && rv.s.max > rd->s.max) ||\n+\t\t\t\trv.s.max < rd->s.max)))\n+\t\teval_smax_bound(&rv, msk);\n+\n+\trd->s = rv.s;\n+\trd->u = rv.u;\n+}\n+\n+static void\n+eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)\n+{\n+\tstruct bpf_reg_val rv;\n+\n+\trv.u.min = (rd->u.min - rs->u.min) & msk;\n+\trv.u.max = (rd->u.min - rs->u.max) & msk;\n+\trv.s.min = (rd->s.min - rs->s.min) & msk;\n+\trv.s.max = (rd->s.max - rs->s.max) & msk;\n+\n+\t/*\n+\t * if at least one of the operands is not constant,\n+\t * then check for overflow\n+\t */\n+\tif ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&\n+\t\t\t(rv.u.min > rd->u.min || rv.u.max > rd->u.max))\n+\t\teval_umax_bound(&rv, msk);\n+\n+\tif ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&\n+\t\t\t(((rs->s.min < 0 && rv.s.min < rd->s.min) ||\n+\t\t\trv.s.min > rd->s.min) ||\n+\t\t\t((rs->s.max < 0 && rv.s.max < rd->s.max) ||\n+\t\t\trv.s.max > rd->s.max)))\n+\t\teval_smax_bound(&rv, msk);\n+\n+\trd->s = rv.s;\n+\trd->u = rv.u;\n+}\n+\n+static void\n+eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* check if shift value is less then max result bits */\n+\tif (rs->u.max >= opsz) {\n+\t\teval_max_bound(rd, msk);\n+\t\treturn;\n+\t}\n+\n+\t/* check for overflow */\n+\tif (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))\n+\t\teval_umax_bound(rd, msk);\n+\telse {\n+\t\trd->u.max <<= rs->u.max;\n+\t\trd->u.min <<= rs->u.min;\n+\t}\n+\n+\t/* check that dreg values are and would remain always positive */\n+\tif ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=\n+\t\t\tRTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))\n+\t\teval_smax_bound(rd, msk);\n+\telse {\n+\t\trd->s.max <<= rs->u.max;\n+\t\trd->s.min <<= rs->u.min;\n+\t}\n+}\n+\n+static void\n+eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* check if shift value is less then max result bits */\n+\tif (rs->u.max >= opsz) {\n+\t\teval_max_bound(rd, msk);\n+\t\treturn;\n+\t}\n+\n+\trd->u.max >>= rs->u.min;\n+\trd->u.min >>= rs->u.max;\n+\n+\t/* check that dreg values are always positive */\n+\tif ((uint64_t)rd->s.min >> (opsz - 1) != 0)\n+\t\teval_smax_bound(rd, msk);\n+\telse {\n+\t\trd->s.max >>= rs->u.min;\n+\t\trd->s.min >>= rs->u.max;\n+\t}\n+}\n+\n+static void\n+eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\tuint32_t shv;\n+\n+\t/* check if shift value is less then max result bits */\n+\tif (rs->u.max >= opsz) {\n+\t\teval_max_bound(rd, msk);\n+\t\treturn;\n+\t}\n+\n+\trd->u.max = (int64_t)rd->u.max >> rs->u.min;\n+\trd->u.min = (int64_t)rd->u.min >> rs->u.max;\n+\n+\t/* if we have 32-bit values - extend them to 64-bit */\n+\tif (opsz == sizeof(uint32_t) * CHAR_BIT) {\n+\t\trd->s.min <<= opsz;\n+\t\trd->s.max <<= opsz;\n+\t\tshv = opsz;\n+\t} else\n+\t\tshv = 0;\n+\n+\tif (rd->s.min < 0)\n+\t\trd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;\n+\telse\n+\t\trd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;\n+\n+\tif (rd->s.max < 0)\n+\t\trd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;\n+\telse\n+\t\trd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;\n+}\n+\n+static uint64_t\n+eval_umax_bits(uint64_t v, size_t opsz)\n+{\n+\tif (v == 0)\n+\t\treturn 0;\n+\n+\tv = __builtin_clzll(v);\n+\treturn RTE_LEN2MASK(opsz - v, uint64_t);\n+}\n+\n+/* estimate max possible value for (v1 & v2) */\n+static uint64_t\n+eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)\n+{\n+\tv1 = eval_umax_bits(v1, opsz);\n+\tv2 = eval_umax_bits(v2, opsz);\n+\treturn (v1 & v2);\n+}\n+\n+/* estimate max possible value for (v1 | v2) */\n+static uint64_t\n+eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)\n+{\n+\tv1 = eval_umax_bits(v1, opsz);\n+\tv2 = eval_umax_bits(v2, opsz);\n+\treturn (v1 | v2);\n+}\n+\n+static void\n+eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* both operands are constants */\n+\tif (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {\n+\t\trd->u.min &= rs->u.min;\n+\t\trd->u.max &= rs->u.max;\n+\t} else {\n+\t\trd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);\n+\t\trd->u.min &= rs->u.min;\n+\t}\n+\n+\t/* both operands are constants */\n+\tif (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {\n+\t\trd->s.min &= rs->s.min;\n+\t\trd->s.max &= rs->s.max;\n+\t/* at least one of operand is non-negative */\n+\t} else if (rd->s.min >= 0 || rs->s.min >= 0) {\n+\t\trd->s.max = eval_uand_max(rd->s.max & (msk >> 1),\n+\t\t\trs->s.max & (msk >> 1), opsz);\n+\t\trd->s.min &= rs->s.min;\n+\t} else\n+\t\teval_smax_bound(rd, msk);\n+}\n+\n+static void\n+eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* both operands are constants */\n+\tif (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {\n+\t\trd->u.min |= rs->u.min;\n+\t\trd->u.max |= rs->u.max;\n+\t} else {\n+\t\trd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);\n+\t\trd->u.min |= rs->u.min;\n+\t}\n+\n+\t/* both operands are constants */\n+\tif (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {\n+\t\trd->s.min |= rs->s.min;\n+\t\trd->s.max |= rs->s.max;\n+\n+\t/* both operands are non-negative */\n+\t} else if (rd->s.min >= 0 || rs->s.min >= 0) {\n+\t\trd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);\n+\t\trd->s.min |= rs->s.min;\n+\t} else\n+\t\teval_smax_bound(rd, msk);\n+}\n+\n+static void\n+eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* both operands are constants */\n+\tif (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {\n+\t\trd->u.min ^= rs->u.min;\n+\t\trd->u.max ^= rs->u.max;\n+\t} else {\n+\t\trd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);\n+\t\trd->u.min = 0;\n+\t}\n+\n+\t/* both operands are constants */\n+\tif (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {\n+\t\trd->s.min ^= rs->s.min;\n+\t\trd->s.max ^= rs->s.max;\n+\n+\t/* both operands are non-negative */\n+\t} else if (rd->s.min >= 0 || rs->s.min >= 0) {\n+\t\trd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);\n+\t\trd->s.min = 0;\n+\t} else\n+\t\teval_smax_bound(rd, msk);\n+}\n+\n+static void\n+eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,\n+\tuint64_t msk)\n+{\n+\t/* both operands are constants */\n+\tif (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {\n+\t\trd->u.min = (rd->u.min * rs->u.min) & msk;\n+\t\trd->u.max = (rd->u.max * rs->u.max) & msk;\n+\t/* check for overflow */\n+\t} else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {\n+\t\trd->u.max *= rs->u.max;\n+\t\trd->u.min *= rd->u.min;\n+\t} else\n+\t\teval_umax_bound(rd, msk);\n+\n+\t/* both operands are constants */\n+\tif (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {\n+\t\trd->s.min = (rd->s.min * rs->s.min) & msk;\n+\t\trd->s.max = (rd->s.max * rs->s.max) & msk;\n+\t/* check that both operands are positive and no overflow */\n+\t} else if (rd->s.min >= 0 && rs->s.min >= 0) {\n+\t\trd->s.max *= rs->s.max;\n+\t\trd->s.min *= rd->s.min;\n+\t} else\n+\t\teval_smax_bound(rd, msk);\n+}\n+\n static const char *\n-eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,\n+\tsize_t opsz, uint64_t msk)\n {\n-\tif (ins->dst_reg == EBPF_REG_10)\n-\t\treturn eval_stack(bvf, ins);\n+\t/* both operands are constants */\n+\tif (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {\n+\t\tif (rs->u.max == 0)\n+\t\t\treturn \"division by 0\";\n+\t\tif (op == BPF_DIV) {\n+\t\t\trd->u.min /= rs->u.min;\n+\t\t\trd->u.max /= rs->u.max;\n+\t\t} else {\n+\t\t\trd->u.min %= rs->u.min;\n+\t\t\trd->u.max %= rs->u.max;\n+\t\t}\n+\t} else {\n+\t\tif (op == BPF_MOD)\n+\t\t\trd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);\n+\t\telse\n+\t\t\trd->u.max = rd->u.max;\n+\t\trd->u.min = 0;\n+\t}\n+\n+\t/* if we have 32-bit values - extend them to 64-bit */\n+\tif (opsz == sizeof(uint32_t) * CHAR_BIT) {\n+\t\trd->s.min = (int32_t)rd->s.min;\n+\t\trd->s.max = (int32_t)rd->s.max;\n+\t\trs->s.min = (int32_t)rs->s.min;\n+\t\trs->s.max = (int32_t)rs->s.max;\n+\t}\n+\n+\t/* both operands are constants */\n+\tif (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {\n+\t\tif (rs->s.max == 0)\n+\t\t\treturn \"division by 0\";\n+\t\tif (op == BPF_DIV) {\n+\t\t\trd->s.min /= rs->s.min;\n+\t\t\trd->s.max /= rs->s.max;\n+\t\t} else {\n+\t\t\trd->s.min %= rs->s.min;\n+\t\t\trd->s.max %= rs->s.max;\n+\t\t}\n+\t} else if (op == BPF_MOD) {\n+\t\trd->s.min = RTE_MAX(rd->s.max, 0);\n+\t\trd->s.min = RTE_MIN(rd->s.min, 0);\n+\t} else\n+\t\teval_smax_bound(rd, msk);\n+\n+\trd->s.max &= msk;\n+\trd->s.min &= msk;\n+\n \treturn NULL;\n }\n \n+static void\n+eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)\n+{\n+\tuint64_t ux, uy;\n+\tint64_t sx, sy;\n+\n+\t/* if we have 32-bit values - extend them to 64-bit */\n+\tif (opsz == sizeof(uint32_t) * CHAR_BIT) {\n+\t\trd->u.min = (int32_t)rd->u.min;\n+\t\trd->u.max = (int32_t)rd->u.max;\n+\t}\n+\n+\tux = -(int64_t)rd->u.min & msk;\n+\tuy = -(int64_t)rd->u.max & msk;\n+\n+\trd->u.max = RTE_MAX(ux, uy);\n+\trd->u.min = RTE_MIN(ux, uy);\n+\n+\t/* if we have 32-bit values - extend them to 64-bit */\n+\tif (opsz == sizeof(uint32_t) * CHAR_BIT) {\n+\t\trd->s.min = (int32_t)rd->s.min;\n+\t\trd->s.max = (int32_t)rd->s.max;\n+\t}\n+\n+\tsx = -rd->s.min & msk;\n+\tsy = -rd->s.max & msk;\n+\n+\trd->s.max = RTE_MAX(sx, sy);\n+\trd->s.min = RTE_MIN(sx, sy);\n+}\n+\n+/*\n+ * check that destination and source operand are in defined state.\n+ */\n+static const char *\n+eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)\n+{\n+\tif (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)\n+\t\treturn \"dest reg value is undefined\";\n+\tif (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)\n+\t\treturn \"src reg value is undefined\";\n+\treturn NULL;\n+}\n+\n+static const char *\n+eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+{\n+\tuint64_t msk;\n+\tuint32_t op;\n+\tsize_t opsz;\n+\tconst char *err;\n+\tstruct bpf_eval_state *st;\n+\tstruct bpf_reg_val *rd, rs;\n+\n+\topsz = (BPF_CLASS(ins->code) == BPF_ALU) ?\n+\t\tsizeof(uint32_t) : sizeof(uint64_t);\n+\topsz = opsz * CHAR_BIT;\n+\tmsk = RTE_LEN2MASK(opsz, uint64_t);\n+\n+\tst = bvf->evst;\n+\trd = st->rv + ins->dst_reg;\n+\n+\tif (BPF_SRC(ins->code) == BPF_X) {\n+\t\trs = st->rv[ins->src_reg];\n+\t\teval_apply_mask(&rs, msk);\n+\t} else\n+\t\teval_fill_imm(&rs, msk, ins->imm);\n+\n+\teval_apply_mask(rd, msk);\n+\n+\top = BPF_OP(ins->code);\n+\n+\terr = eval_defined((op != EBPF_MOV) ? rd : NULL,\n+\t\t\t(op != BPF_NEG) ? &rs : NULL);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+\tif (op == BPF_ADD)\n+\t\teval_add(rd, &rs, msk);\n+\telse if (op == BPF_SUB)\n+\t\teval_sub(rd, &rs, msk);\n+\telse if (op == BPF_LSH)\n+\t\teval_lsh(rd, &rs, opsz, msk);\n+\telse if (op == BPF_RSH)\n+\t\teval_rsh(rd, &rs, opsz, msk);\n+\telse if (op == EBPF_ARSH)\n+\t\teval_arsh(rd, &rs, opsz, msk);\n+\telse if (op == BPF_AND)\n+\t\teval_and(rd, &rs, opsz, msk);\n+\telse if (op == BPF_OR)\n+\t\teval_or(rd, &rs, opsz, msk);\n+\telse if (op == BPF_XOR)\n+\t\teval_xor(rd, &rs, opsz, msk);\n+\telse if (op == BPF_MUL)\n+\t\teval_mul(rd, &rs, opsz, msk);\n+\telse if (op == BPF_DIV || op == BPF_MOD)\n+\t\terr = eval_divmod(op, rd, &rs, opsz, msk);\n+\telse if (op == BPF_NEG)\n+\t\teval_neg(rd, opsz, msk);\n+\telse if (op == EBPF_MOV)\n+\t\t*rd = rs;\n+\telse\n+\t\teval_max_bound(rd, msk);\n+\n+\treturn err;\n+}\n+\n+static const char *\n+eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+{\n+\tuint64_t msk;\n+\tstruct bpf_eval_state *st;\n+\tstruct bpf_reg_val *rd;\n+\tconst char *err;\n+\n+\tmsk = RTE_LEN2MASK(ins->imm, uint64_t);\n+\n+\tst = bvf->evst;\n+\trd = st->rv + ins->dst_reg;\n+\n+\terr = eval_defined(rd, NULL);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN\n+\tif (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))\n+\t\teval_max_bound(rd, msk);\n+\telse\n+\t\teval_apply_mask(rd, msk);\n+#else\n+\tif (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))\n+\t\teval_max_bound(rd, msk);\n+\telse\n+\t\teval_apply_mask(rd, msk);\n+#endif\n+\n+\treturn NULL;\n+}\n+\n+static const char *\n+eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,\n+\tuint32_t align, int16_t off)\n+{\n+\tstruct bpf_reg_val rv;\n+\n+\t/* calculate reg + offset */\n+\teval_fill_imm(&rv, rm->mask, off);\n+\teval_add(rm, &rv, rm->mask);\n+\n+\tif (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)\n+\t\treturn \"destination is not a pointer\";\n+\n+\tif (rm->mask != UINT64_MAX)\n+\t\treturn \"pointer truncation\";\n+\n+\tif (rm->u.max + opsz > rm->v.size ||\n+\t\t\t(uint64_t)rm->s.max + opsz > rm->v.size ||\n+\t\t\trm->s.min < 0)\n+\t\treturn \"memory boundary violation\";\n+\n+\tif (rm->u.max % align !=  0)\n+\t\treturn \"unaligned memory access\";\n+\n+\tif (rm->v.type == RTE_BPF_ARG_PTR_STACK) {\n+\n+\t\tif (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||\n+\t\t\t\trm->u.max != (uint64_t)rm->s.max)\n+\t\t\treturn \"stack access with variable offset\";\n+\n+\t\tbvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);\n+\n+\t/* pointer to mbuf */\n+\t} else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {\n+\n+\t\tif (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||\n+\t\t\t\trm->u.max != (uint64_t)rm->s.max)\n+\t\t\treturn \"mbuf access with variable offset\";\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static void\n+eval_max_load(struct bpf_reg_val *rv, uint64_t mask)\n+{\n+\teval_umax_bound(rv, mask);\n+\n+\t/* full 64-bit load */\n+\tif (mask == UINT64_MAX)\n+\t\teval_smax_bound(rv, mask);\n+\n+\t/* zero-extend load */\n+\trv->s.min = rv->u.min;\n+\trv->s.max = rv->u.max;\n+}\n+\n+\n static const char *\n eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n {\n-\tif (ins->src_reg == EBPF_REG_10)\n-\t\treturn eval_stack(bvf, ins);\n+\tuint32_t opsz;\n+\tuint64_t msk;\n+\tconst char *err;\n+\tstruct bpf_eval_state *st;\n+\tstruct bpf_reg_val *rd, rs;\n+\tconst struct bpf_reg_val *sv;\n+\n+\tst = bvf->evst;\n+\trd = st->rv + ins->dst_reg;\n+\trs = st->rv[ins->src_reg];\n+\topsz = bpf_size(BPF_SIZE(ins->code));\n+\tmsk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);\n+\n+\terr = eval_ptr(bvf, &rs, opsz, 1, ins->off);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+\tif (rs.v.type == RTE_BPF_ARG_PTR_STACK) {\n+\n+\t\tsv = st->sv + rs.u.max / sizeof(uint64_t);\n+\t\tif (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)\n+\t\t\treturn \"undefined value on the stack\";\n+\n+\t\t*rd = *sv;\n+\n+\t/* pointer to mbuf */\n+\t} else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {\n+\n+\t\tif (rs.u.max == offsetof(struct rte_mbuf, next)) {\n+\t\t\teval_fill_imm(rd, msk, 0);\n+\t\t\trd->v = rs.v;\n+\t\t} else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {\n+\t\t\teval_fill_imm(rd, msk, 0);\n+\t\t\trd->v.type = RTE_BPF_ARG_PTR;\n+\t\t\trd->v.size = rs.v.buf_size;\n+\t\t} else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {\n+\t\t\teval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);\n+\t\t\trd->v.type = RTE_BPF_ARG_RAW;\n+\t\t} else {\n+\t\t\teval_max_load(rd, msk);\n+\t\t\trd->v.type = RTE_BPF_ARG_RAW;\n+\t\t}\n+\n+\t/* pointer to raw data */\n+\t} else {\n+\t\teval_max_load(rd, msk);\n+\t\trd->v.type = RTE_BPF_ARG_RAW;\n+\t}\n+\n \treturn NULL;\n }\n \n static const char *\n+eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)\n+{\n+\tuint32_t i;\n+\n+\tstatic const struct {\n+\t\tsize_t off;\n+\t\tsize_t sz;\n+\t} mbuf_ro_fileds[] = {\n+\t\t{ .off = offsetof(struct rte_mbuf, buf_addr), },\n+\t\t{ .off = offsetof(struct rte_mbuf, refcnt), },\n+\t\t{ .off = offsetof(struct rte_mbuf, nb_segs), },\n+\t\t{ .off = offsetof(struct rte_mbuf, buf_len), },\n+\t\t{ .off = offsetof(struct rte_mbuf, pool), },\n+\t\t{ .off = offsetof(struct rte_mbuf, next), },\n+\t\t{ .off = offsetof(struct rte_mbuf, priv_size), },\n+\t};\n+\n+\tfor (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&\n+\t\t\t(mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=\n+\t\t\trv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);\n+\t\t\ti++)\n+\t\t;\n+\n+\tif (i != RTE_DIM(mbuf_ro_fileds))\n+\t\treturn \"store to the read-only mbuf field\";\n+\n+\treturn NULL;\n+\n+}\n+\n+static const char *\n+eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+{\n+\tuint32_t opsz;\n+\tuint64_t msk;\n+\tconst char *err;\n+\tstruct bpf_eval_state *st;\n+\tstruct bpf_reg_val rd, rs, *sv;\n+\n+\topsz = bpf_size(BPF_SIZE(ins->code));\n+\tmsk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);\n+\n+\tst = bvf->evst;\n+\trd = st->rv[ins->dst_reg];\n+\n+\tif (BPF_CLASS(ins->code) == BPF_STX) {\n+\t\trs = st->rv[ins->src_reg];\n+\t\teval_apply_mask(&rs, msk);\n+\t} else\n+\t\teval_fill_imm(&rs, msk, ins->imm);\n+\n+\terr = eval_defined(NULL, &rs);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+\terr = eval_ptr(bvf, &rd, opsz, 1, ins->off);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+\tif (rd.v.type == RTE_BPF_ARG_PTR_STACK) {\n+\n+\t\tsv = st->sv + rd.u.max / sizeof(uint64_t);\n+\t\tif (BPF_CLASS(ins->code) == BPF_STX &&\n+\t\t\t\tBPF_MODE(ins->code) == EBPF_XADD)\n+\t\t\teval_max_bound(sv, msk);\n+\t\telse\n+\t\t\t*sv = rs;\n+\n+\t/* pointer to mbuf */\n+\t} else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {\n+\t\terr = eval_mbuf_store(&rd, opsz);\n+\t\tif (err != NULL)\n+\t\t\treturn err;\n+\t}\n+\n+\treturn NULL;\n+}\n+\n+static const char *\n+eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,\n+\tstruct bpf_reg_val *rv)\n+{\n+\tuint32_t i, n;\n+\tstruct bpf_eval_state *st;\n+\tconst char *err;\n+\n+\tst = bvf->evst;\n+\n+\tif (rv->v.type == RTE_BPF_ARG_UNDEF)\n+\t\treturn \"Undefined argument type\";\n+\n+\tif (arg->type != rv->v.type &&\n+\t\t\targ->type != RTE_BPF_ARG_RAW &&\n+\t\t\t(arg->type != RTE_BPF_ARG_PTR ||\n+\t\t\tRTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))\n+\t\treturn \"Invalid argument type\";\n+\n+\terr = NULL;\n+\n+\t/* argument is a pointer */\n+\tif (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {\n+\n+\t\terr = eval_ptr(bvf, rv, arg->size, 1, 0);\n+\n+\t\t/*\n+\t\t * pointer to the variable on the stack is passed\n+\t\t * as an argument, mark stack space it occupies as initialized.\n+\t\t */\n+\t\tif (err == NULL && rv->v.type == RTE_BPF_ARG_PTR_STACK) {\n+\n+\t\t\ti = rv->u.max / sizeof(uint64_t);\n+\t\t\tn = i + arg->size / sizeof(uint64_t);\n+\t\t\twhile (i != n) {\n+\t\t\t\teval_fill_max_bound(st->sv + i, UINT64_MAX);\n+\t\t\t\ti++;\n+\t\t\t};\n+\t\t}\n+\t}\n+\n+\treturn err;\n+}\n+\n+static const char *\n eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n {\n-\tuint32_t idx;\n+\tuint64_t msk;\n+\tuint32_t i, idx;\n+\tstruct bpf_reg_val *rv;\n+\tconst struct rte_bpf_xsym *xsym;\n+\tconst char *err;\n \n \tidx = ins->imm;\n \n@@ -145,6 +940,144 @@ eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n \t/* for now don't support function calls on 32 bit platform */\n \tif (sizeof(uint64_t) != sizeof(uintptr_t))\n \t\treturn \"function calls are supported only for 64 bit apps\";\n+\n+\txsym = bvf->prm->xsym + idx;\n+\n+\t/* evaluate function arguments */\n+\terr = NULL;\n+\tfor (i = 0; i != xsym->func.nb_args && err == NULL; i++) {\n+\t\terr = eval_func_arg(bvf, xsym->func.args + i,\n+\t\t\tbvf->evst->rv + EBPF_REG_1 + i);\n+\t}\n+\n+\t/* R1-R5 argument/scratch registers */\n+\tfor (i = EBPF_REG_1; i != EBPF_REG_6; i++)\n+\t\tbvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;\n+\n+\t/* update return value */\n+\n+\trv = bvf->evst->rv + EBPF_REG_0;\n+\trv->v = xsym->func.ret;\n+\tmsk = (rv->v.type == RTE_BPF_ARG_RAW) ?\n+\t\tRTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t) : UINTPTR_MAX;\n+\teval_max_bound(rv, msk);\n+\trv->mask = msk;\n+\n+\treturn err;\n+}\n+\n+static void\n+eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)\n+{\n+\t/* sreg is constant */\n+\tif (trs->u.min == trs->u.max) {\n+\t\ttrd->u = trs->u;\n+\t/* dreg is constant */\n+\t} else if (trd->u.min == trd->u.max) {\n+\t\ttrs->u = trd->u;\n+\t} else {\n+\t\ttrd->u.max = RTE_MIN(trd->u.max, trs->u.max);\n+\t\ttrd->u.min = RTE_MAX(trd->u.min, trs->u.min);\n+\t\ttrs->u = trd->u;\n+\t}\n+\n+\t/* sreg is constant */\n+\tif (trs->s.min == trs->s.max) {\n+\t\ttrd->s = trs->s;\n+\t/* dreg is constant */\n+\t} else if (trd->s.min == trd->s.max) {\n+\t\ttrs->s = trd->s;\n+\t} else {\n+\t\ttrd->s.max = RTE_MIN(trd->s.max, trs->s.max);\n+\t\ttrd->s.min = RTE_MAX(trd->s.min, trs->s.min);\n+\t\ttrs->s = trd->s;\n+\t}\n+}\n+\n+static void\n+eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,\n+\tstruct bpf_reg_val *frd, struct bpf_reg_val *frs)\n+{\n+\tfrd->u.max = RTE_MIN(frd->u.max, frs->u.min);\n+\ttrd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);\n+}\n+\n+static void\n+eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,\n+\tstruct bpf_reg_val *frd, struct bpf_reg_val *frs)\n+{\n+\tfrd->u.min = RTE_MAX(frd->u.min, frs->u.min);\n+\ttrd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);\n+}\n+\n+static void\n+eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,\n+\tstruct bpf_reg_val *frd, struct bpf_reg_val *frs)\n+{\n+\tfrd->s.max = RTE_MIN(frd->s.max, frs->s.min);\n+\ttrd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);\n+}\n+\n+static void\n+eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,\n+\tstruct bpf_reg_val *frd, struct bpf_reg_val *frs)\n+{\n+\tfrd->s.min = RTE_MAX(frd->s.min, frs->s.min);\n+\ttrd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);\n+}\n+\n+static const char *\n+eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)\n+{\n+\tuint32_t op;\n+\tconst char *err;\n+\tstruct bpf_eval_state *fst, *tst;\n+\tstruct bpf_reg_val *frd, *frs, *trd, *trs;\n+\tstruct bpf_reg_val rvf, rvt;\n+\n+\ttst = bvf->evst;\n+\tfst = bvf->evin->evst;\n+\n+\tfrd = fst->rv + ins->dst_reg;\n+\ttrd = tst->rv + ins->dst_reg;\n+\n+\tif (BPF_SRC(ins->code) == BPF_X) {\n+\t\tfrs = fst->rv + ins->src_reg;\n+\t\ttrs = tst->rv + ins->src_reg;\n+\t} else {\n+\t\tfrs = &rvf;\n+\t\ttrs = &rvt;\n+\t\teval_fill_imm(frs, UINT64_MAX, ins->imm);\n+\t\teval_fill_imm(trs, UINT64_MAX, ins->imm);\n+\t}\n+\n+\terr = eval_defined(trd, trs);\n+\tif (err != NULL)\n+\t\treturn err;\n+\n+\top = BPF_OP(ins->code);\n+\n+\tif (op == BPF_JEQ)\n+\t\teval_jeq_jne(trd, trs);\n+\telse if (op == EBPF_JNE)\n+\t\teval_jeq_jne(frd, frs);\n+\telse if (op == BPF_JGT)\n+\t\teval_jgt_jle(trd, trs, frd, frs);\n+\telse if (op == EBPF_JLE)\n+\t\teval_jgt_jle(frd, frs, trd, trs);\n+\telse if (op == EBPF_JLT)\n+\t\teval_jlt_jge(trd, trs, frd, frs);\n+\telse if (op == BPF_JGE)\n+\t\teval_jlt_jge(frd, frs, trd, trs);\n+\telse if (op == EBPF_JSGT)\n+\t\teval_jsgt_jsle(trd, trs, frd, frs);\n+\telse if (op == EBPF_JSLE)\n+\t\teval_jsgt_jsle(frd, frs, trd, trs);\n+\telse if (op == EBPF_JLT)\n+\t\teval_jslt_jsge(trd, trs, frd, frs);\n+\telse if (op == EBPF_JSGE)\n+\t\teval_jslt_jsge(frd, frs, trd, trs);\n+\n \treturn NULL;\n }\n \n@@ -157,256 +1090,306 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_SUB | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_AND | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_OR | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_LSH | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_RSH | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_XOR | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_MUL | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | EBPF_MOV | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_DIV | BPF_K)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 1, .max = UINT32_MAX},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_MOD | BPF_K)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 1, .max = UINT32_MAX},\n+\t\t.eval = eval_alu,\n \t},\n \t/* ALU IMM 64-bit instructions */\n \t[(EBPF_ALU64 | BPF_ADD | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_SUB | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_AND | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_OR | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_LSH | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_RSH | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_XOR | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_MUL | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {\n \t\t.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX,},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_DIV | BPF_K)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 1, .max = UINT32_MAX},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_MOD | BPF_K)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 1, .max = UINT32_MAX},\n+\t\t.eval = eval_alu,\n \t},\n \t/* ALU REG 32-bit instructions */\n \t[(BPF_ALU | BPF_ADD | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_SUB | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_AND | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_OR | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_LSH | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_RSH | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_XOR | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_MUL | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_DIV | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_MOD | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | EBPF_MOV | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | BPF_NEG)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 16, .max = 64},\n \t\t.check = check_alu_bele,\n+\t\t.eval = eval_bele,\n \t},\n \t[(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 16, .max = 64},\n \t\t.check = check_alu_bele,\n+\t\t.eval = eval_bele,\n \t},\n \t/* ALU REG 64-bit instructions */\n \t[(EBPF_ALU64 | BPF_ADD | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_SUB | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_AND | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_OR | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_LSH | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_RSH | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_XOR | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_MUL | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_DIV | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_MOD | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t[(EBPF_ALU64 | BPF_NEG)] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_alu,\n \t},\n \t/* load instructions */\n \t[(BPF_LDX | BPF_MEM | BPF_B)] = {\n@@ -438,6 +1421,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {\n \t\t.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_ld_imm64,\n \t},\n \t/* store REG instructions */\n \t[(BPF_STX | BPF_MEM | BPF_B)] = {\n@@ -513,92 +1497,110 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JNE | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JGT | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JLT | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JGE | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JLE | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSGT | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSLT | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSGE | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSLE | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JSET | BPF_K)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = UINT32_MAX},\n+\t\t.eval = eval_jcc,\n \t},\n \t/* jcc REG instructions */\n \t[(BPF_JMP | BPF_JEQ | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JNE | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JGT | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JLT | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JGE | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JLE | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSGT | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSLT | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n@@ -609,16 +1611,19 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | EBPF_JSLE | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t[(BPF_JMP | BPF_JSET | BPF_X)] = {\n \t\t.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},\n \t\t.off = { .min = 0, .max = UINT16_MAX},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_jcc,\n \t},\n \t/* call instruction */\n \t[(BPF_JMP | EBPF_CALL)] = {\n@@ -632,6 +1637,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {\n \t\t.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},\n \t\t.off = { .min = 0, .max = 0},\n \t\t.imm = { .min = 0, .max = 0},\n+\t\t.eval = eval_exit,\n \t},\n };\n \n@@ -1046,7 +2052,7 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)\n \tst = pull_eval_state(bvf);\n \tif (st == NULL) {\n \t\tRTE_BPF_LOG(ERR,\n-\t\t\t\"%s: internal error (out of space) at pc: %u\",\n+\t\t\t\"%s: internal error (out of space) at pc: %u\\n\",\n \t\t\t__func__, get_node_idx(bvf, node));\n \t\treturn -ENOMEM;\n \t}\n@@ -1078,6 +2084,32 @@ restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)\n \tpush_eval_state(bvf);\n }\n \n+static void\n+log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,\n+\tuint32_t pc, int32_t loglvl)\n+{\n+\tconst struct bpf_eval_state *st;\n+\tconst struct bpf_reg_val *rv;\n+\n+\trte_log(loglvl, rte_bpf_logtype, \"%s(pc=%u):\\n\", __func__, pc);\n+\n+\tst = bvf->evst;\n+\trv = st->rv + ins->dst_reg;\n+\n+\trte_log(loglvl, rte_bpf_logtype,\n+\t\t\"r%u={\\n\"\n+\t\t\"\\tv={type=%u, size=%zu},\\n\"\n+\t\t\"\\tmask=0x%\" PRIx64 \",\\n\"\n+\t\t\"\\tu={min=0x%\" PRIx64 \", max=0x%\" PRIx64 \"},\\n\"\n+\t\t\"\\ts={min=%\" PRId64 \", max=%\" PRId64 \"},\\n\"\n+\t\t\"};\\n\",\n+\t\tins->dst_reg,\n+\t\trv->v.type, rv->v.size,\n+\t\trv->mask,\n+\t\trv->u.min, rv->u.max,\n+\t\trv->s.min, rv->s.max);\n+}\n+\n /*\n  * Do second pass through CFG and try to evaluate instructions\n  * via each possible path.\n@@ -1096,23 +2128,56 @@ evaluate(struct bpf_verifier *bvf)\n \tconst struct ebpf_insn *ins;\n \tstruct inst_node *next, *node;\n \n-\tnode = bvf->in;\n+\t/* initial state of frame pointer */\n+\tstatic const struct bpf_reg_val rvfp = {\n+\t\t.v = {\n+\t\t\t.type = RTE_BPF_ARG_PTR_STACK,\n+\t\t\t.size = MAX_BPF_STACK_SIZE,\n+\t\t},\n+\t\t.mask = UINT64_MAX,\n+\t\t.u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},\n+\t\t.s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},\n+\t};\n+\n+\tbvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;\n+\tbvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;\n+\tif (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)\n+\t\teval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);\n+\n+\tbvf->evst->rv[EBPF_REG_10] = rvfp;\n+\n \tins = bvf->prm->ins;\n+\tnode = bvf->in;\n+\tnext = node;\n \trc = 0;\n \n \twhile (node != NULL && rc == 0) {\n \n-\t\t/* current node evaluation */\n-\t\tidx = get_node_idx(bvf, node);\n-\t\top = ins[idx].code;\n+\t\t/*\n+\t\t * current node evaluation, make sure we evaluate\n+\t\t * each node only once.\n+\t\t */\n+\t\tif (next != NULL) {\n+\n+\t\t\tbvf->evin = node;\n+\t\t\tidx = get_node_idx(bvf, node);\n+\t\t\top = ins[idx].code;\n \n-\t\tif (ins_chk[op].eval != NULL) {\n-\t\t\terr = ins_chk[op].eval(bvf, ins + idx);\n-\t\t\tif (err != NULL) {\n-\t\t\t\tRTE_BPF_LOG(ERR, \"%s: %s at pc: %u\\n\",\n-\t\t\t\t\t__func__, err, idx);\n-\t\t\t\trc = -EINVAL;\n+\t\t\t/* for jcc node make a copy of evaluatoion state */\n+\t\t\tif (node->nb_edge > 1)\n+\t\t\t\trc |= save_eval_state(bvf, node);\n+\n+\t\t\tif (ins_chk[op].eval != NULL && rc == 0) {\n+\t\t\t\terr = ins_chk[op].eval(bvf, ins + idx);\n+\t\t\t\tif (err != NULL) {\n+\t\t\t\t\tRTE_BPF_LOG(ERR, \"%s: %s at pc: %u\\n\",\n+\t\t\t\t\t\t__func__, err, idx);\n+\t\t\t\t\trc = -EINVAL;\n+\t\t\t\t}\n \t\t\t}\n+\n+\t\t\tlog_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);\n+\t\t\tbvf->evin = NULL;\n \t\t}\n \n \t\t/* proceed through CFG */\n@@ -1120,9 +2185,8 @@ evaluate(struct bpf_verifier *bvf)\n \t\tif (next != NULL) {\n \n \t\t\t/* proceed with next child */\n-\t\t\tif (node->cur_edge != node->nb_edge)\n-\t\t\t\trc |= save_eval_state(bvf, node);\n-\t\t\telse if (node->evst != NULL)\n+\t\t\tif (node->cur_edge == node->nb_edge &&\n+\t\t\t\t\tnode->evst != NULL)\n \t\t\t\trestore_eval_state(bvf, node);\n \n \t\t\tnext->prev_node = get_node_idx(bvf, node);\n",
    "prefixes": [
        "dpdk-dev",
        "2/3"
    ]
}