riscv: bpf: eliminate zero extension code-gen [Linux 5.3]

This Linux kernel change "riscv: bpf: eliminate zero extension code-gen" is included in the Linux 5.3 release. This change is authored by Jiong Wang <jiong.wang [at] netronome.com> on Fri May 24 23:25:27 2019 +0100. The commit for this change in Linux stable tree is 66d0d5a (patch).

riscv: bpf: eliminate zero extension code-gen

Cc: Björn Töpel <bjorn.topel@gmail.com>
Acked-by: Björn Töpel <bjorn.topel@gmail.com>
Tested-by: Björn Töpel <bjorn.topel@gmail.com>
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

There are 43 lines of Linux source code added/deleted in this change. Code changes to Linux kernel are as follows.

 arch/riscv/net/bpf_jit_comp.c | 43 ++++++++++++++++++++++++++++++-------------
 1 file changed, 30 insertions(+), 13 deletions(-)

diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c
index 80b12aa..c4c836e 100644
--- a/arch/riscv/net/bpf_jit_comp.c
+++ b/arch/riscv/net/bpf_jit_comp.c
@@ -731,6 +731,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
 {
    bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
            BPF_CLASS(insn->code) == BPF_JMP;
+   struct bpf_prog_aux *aux = ctx->prog->aux;
    int rvoff, i = insn - ctx->prog->insnsi;
    u8 rd = -1, rs = -1, code = insn->code;
    s16 off = insn->off;
@@ -742,8 +743,13 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
    /* dst = src */
    case BPF_ALU | BPF_MOV | BPF_X:
    case BPF_ALU64 | BPF_MOV | BPF_X:
+       if (imm == 1) {
+           /* Special mov32 for zext */
+           emit_zext_32(rd, ctx);
+           break;
+       }
        emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;

@@ -771,19 +777,19 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
    case BPF_ALU | BPF_MUL | BPF_X:
    case BPF_ALU64 | BPF_MUL | BPF_X:
        emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_DIV | BPF_X:
    case BPF_ALU64 | BPF_DIV | BPF_X:
        emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_MOD | BPF_X:
    case BPF_ALU64 | BPF_MOD | BPF_X:
        emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_LSH | BPF_X:
@@ -867,7 +873,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
    case BPF_ALU | BPF_MOV | BPF_K:
    case BPF_ALU64 | BPF_MOV | BPF_K:
        emit_imm(rd, imm, ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;

@@ -882,7 +888,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
            emit(is64 ? rv_add(rd, rd, RV_REG_T1) :
                 rv_addw(rd, rd, RV_REG_T1), ctx);
        }
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_SUB | BPF_K:
@@ -895,7 +901,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
            emit(is64 ? rv_sub(rd, rd, RV_REG_T1) :
                 rv_subw(rd, rd, RV_REG_T1), ctx);
        }
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_AND | BPF_K:
@@ -906,7 +912,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
            emit_imm(RV_REG_T1, imm, ctx);
            emit(rv_and(rd, rd, RV_REG_T1), ctx);
        }
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_OR | BPF_K:
@@ -917,7 +923,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
            emit_imm(RV_REG_T1, imm, ctx);
            emit(rv_or(rd, rd, RV_REG_T1), ctx);
        }
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_XOR | BPF_K:
@@ -928,7 +934,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
            emit_imm(RV_REG_T1, imm, ctx);
            emit(rv_xor(rd, rd, RV_REG_T1), ctx);
        }
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_MUL | BPF_K:
@@ -936,7 +942,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, imm, ctx);
        emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
             rv_mulw(rd, rd, RV_REG_T1), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_DIV | BPF_K:
@@ -944,7 +950,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, imm, ctx);
        emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
             rv_divuw(rd, rd, RV_REG_T1), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_MOD | BPF_K:
@@ -952,7 +958,7 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, imm, ctx);
        emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
             rv_remuw(rd, rd, RV_REG_T1), ctx);
-       if (!is64)
+       if (!is64 && !aux->verifier_zext)
            emit_zext_32(rd, ctx);
        break;
    case BPF_ALU | BPF_LSH | BPF_K:
@@ -1239,6 +1245,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, off, ctx);
        emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
        emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
+       if (insn_is_zext(&insn[1]))
+           return 1;
        break;
    case BPF_LDX | BPF_MEM | BPF_H:
        if (is_12b_int(off)) {
@@ -1249,6 +1257,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, off, ctx);
        emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
        emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
+       if (insn_is_zext(&insn[1]))
+           return 1;
        break;
    case BPF_LDX | BPF_MEM | BPF_W:
        if (is_12b_int(off)) {
@@ -1259,6 +1269,8 @@ static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        emit_imm(RV_REG_T1, off, ctx);
        emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
        emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
+       if (insn_is_zext(&insn[1]))
+           return 1;
        break;
    case BPF_LDX | BPF_MEM | BPF_DW:
        if (is_12b_int(off)) {
@@ -1503,6 +1515,11 @@ static void bpf_flush_icache(void *start, void *end)
    flush_icache_range((unsigned long)start, (unsigned long)end);
 }

+bool bpf_jit_needs_zext(void)
+{
+   return true;
+}
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
    bool tmp_blinded = false, extra_pass = false;

Leave a Reply

Your email address will not be published. Required fields are marked *