2 * bpf_jit_comp64.c: eBPF JIT compiler
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
14 #include <linux/moduleloader.h>
15 #include <asm/cacheflush.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19 #include <asm/kprobes.h>
21 #include "bpf_jit64.h"
23 int bpf_jit_enable __read_mostly;
25 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
29 /* Fill whole space with trap instructions */
30 while (p < (int *)((char *)area + size))
31 *p++ = BREAKPOINT_INSTRUCTION;
34 static inline void bpf_flush_icache(void *start, void *end)
37 flush_icache_range((unsigned long)start, (unsigned long)end);
40 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
42 return (ctx->seen & (1 << (31 - b2p[i])));
45 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
47 ctx->seen |= (1 << (31 - b2p[i]));
50 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
53 * We only need a stack frame if:
54 * - we call other functions (kernel helpers), or
55 * - the bpf program uses its stack area
56 * The latter condition is deduced from the usage of BPF_REG_FP
58 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
62 * When not setting up our own stackframe, the redzone usage is:
64 * [ prev sp ] <-------------
66 * sp (r1) ---> [ stack pointer ] --------------
67 * [ nv gpr save area ] 8*8
70 * [ unused red zone ] 208 bytes protected
72 static int bpf_jit_stack_local(struct codegen_context *ctx)
74 if (bpf_has_stack_frame(ctx))
75 return STACK_FRAME_MIN_SIZE + MAX_BPF_STACK;
77 return -(BPF_PPC_STACK_SAVE + 16);
80 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
82 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
83 return (bpf_has_stack_frame(ctx) ? BPF_PPC_STACKFRAME : 0)
86 pr_err("BPF JIT is asking about unknown registers");
90 static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
93 * Load skb->len and skb->data_len
96 PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
97 PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
98 /* header_len = len - data_len */
99 PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
101 /* skb->data pointer */
102 PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
105 static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
107 #ifdef PPC64_ELF_ABI_v1
108 /* func points to the function descriptor */
109 PPC_LI64(b2p[TMP_REG_2], func);
110 /* Load actual entry point from function descriptor */
111 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
112 /* ... and move it to LR */
113 PPC_MTLR(b2p[TMP_REG_1]);
115 * Load TOC from function descriptor at offset 8.
116 * We can clobber r2 since we get called through a
117 * function pointer (so caller will save/restore r2)
118 * and since we don't use a TOC ourself.
120 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
122 /* We can clobber r12 */
123 PPC_FUNC_ADDR(12, func);
129 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
133 if (bpf_has_stack_frame(ctx)) {
135 * We need a stack frame, but we don't necessarily need to
136 * save/restore LR unless we call other functions
138 if (ctx->seen & SEEN_FUNC) {
139 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
140 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
143 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
147 * Back up non-volatile regs -- BPF registers 6-10
148 * If we haven't created our own stack frame, we save these
149 * in the protected zone below the previous stack frame
151 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
152 if (bpf_is_seen_register(ctx, i))
153 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
156 * Save additional non-volatile regs if we cache skb
157 * Also, setup skb data
159 if (ctx->seen & SEEN_SKB) {
160 PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
161 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
162 PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
163 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
164 bpf_jit_emit_skb_loads(image, ctx);
167 /* Setup frame pointer to point to the bpf stack area */
168 if (bpf_is_seen_register(ctx, BPF_REG_FP))
169 PPC_ADDI(b2p[BPF_REG_FP], 1,
170 STACK_FRAME_MIN_SIZE + MAX_BPF_STACK);
173 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
177 /* Move result to r3 */
178 PPC_MR(3, b2p[BPF_REG_0]);
181 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
182 if (bpf_is_seen_register(ctx, i))
183 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
185 /* Restore non-volatile registers used for skb cache */
186 if (ctx->seen & SEEN_SKB) {
187 PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
188 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
189 PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
190 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
193 /* Tear down our stack frame */
194 if (bpf_has_stack_frame(ctx)) {
195 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
196 if (ctx->seen & SEEN_FUNC) {
197 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
205 /* Assemble the body code between the prologue & epilogue */
206 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
207 struct codegen_context *ctx,
210 const struct bpf_insn *insn = fp->insnsi;
214 /* Start of epilogue code - will only be valid 2nd pass onwards */
215 u32 exit_addr = addrs[flen];
217 for (i = 0; i < flen; i++) {
218 u32 code = insn[i].code;
219 u32 dst_reg = b2p[insn[i].dst_reg];
220 u32 src_reg = b2p[insn[i].src_reg];
221 s16 off = insn[i].off;
222 s32 imm = insn[i].imm;
228 * addrs[] maps a BPF bytecode address into a real offset from
229 * the start of the body code.
231 addrs[i] = ctx->idx * 4;
234 * As an optimization, we note down which non-volatile registers
235 * are used so that we can only save/restore those in our
236 * prologue and epilogue. We do this here regardless of whether
237 * the actual BPF instruction uses src/dst registers or not
238 * (for instance, BPF_CALL does not use them). The expectation
239 * is that those instructions will have src_reg/dst_reg set to
240 * 0. Even otherwise, we just lose some prologue/epilogue
241 * optimization but everything else should work without
244 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
245 bpf_set_seen_register(ctx, insn[i].dst_reg);
246 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
247 bpf_set_seen_register(ctx, insn[i].src_reg);
251 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
253 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
254 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
255 PPC_ADD(dst_reg, dst_reg, src_reg);
256 goto bpf_alu32_trunc;
257 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
258 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
259 PPC_SUB(dst_reg, dst_reg, src_reg);
260 goto bpf_alu32_trunc;
261 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
262 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
263 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
264 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
265 if (BPF_OP(code) == BPF_SUB)
268 if (imm >= -32768 && imm < 32768)
269 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
271 PPC_LI32(b2p[TMP_REG_1], imm);
272 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
275 goto bpf_alu32_trunc;
276 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
277 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
278 if (BPF_CLASS(code) == BPF_ALU)
279 PPC_MULW(dst_reg, dst_reg, src_reg);
281 PPC_MULD(dst_reg, dst_reg, src_reg);
282 goto bpf_alu32_trunc;
283 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
284 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
285 if (imm >= -32768 && imm < 32768)
286 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
288 PPC_LI32(b2p[TMP_REG_1], imm);
289 if (BPF_CLASS(code) == BPF_ALU)
290 PPC_MULW(dst_reg, dst_reg,
293 PPC_MULD(dst_reg, dst_reg,
296 goto bpf_alu32_trunc;
297 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
298 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
299 PPC_CMPWI(src_reg, 0);
300 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
301 PPC_LI(b2p[BPF_REG_0], 0);
303 if (BPF_OP(code) == BPF_MOD) {
304 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
305 PPC_MULW(b2p[TMP_REG_1], src_reg,
307 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
309 PPC_DIVWU(dst_reg, dst_reg, src_reg);
310 goto bpf_alu32_trunc;
311 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
312 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
313 PPC_CMPDI(src_reg, 0);
314 PPC_BCC_SHORT(COND_NE, (ctx->idx * 4) + 12);
315 PPC_LI(b2p[BPF_REG_0], 0);
317 if (BPF_OP(code) == BPF_MOD) {
318 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
319 PPC_MULD(b2p[TMP_REG_1], src_reg,
321 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
323 PPC_DIVD(dst_reg, dst_reg, src_reg);
325 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
326 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
327 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
328 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
332 goto bpf_alu32_trunc;
334 PPC_LI32(b2p[TMP_REG_1], imm);
335 switch (BPF_CLASS(code)) {
337 if (BPF_OP(code) == BPF_MOD) {
338 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
340 PPC_MULW(b2p[TMP_REG_1],
343 PPC_SUB(dst_reg, dst_reg,
346 PPC_DIVWU(dst_reg, dst_reg,
350 if (BPF_OP(code) == BPF_MOD) {
351 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
353 PPC_MULD(b2p[TMP_REG_1],
356 PPC_SUB(dst_reg, dst_reg,
359 PPC_DIVD(dst_reg, dst_reg,
363 goto bpf_alu32_trunc;
364 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
365 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
366 PPC_NEG(dst_reg, dst_reg);
367 goto bpf_alu32_trunc;
370 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
372 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
373 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
374 PPC_AND(dst_reg, dst_reg, src_reg);
375 goto bpf_alu32_trunc;
376 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
377 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
379 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
382 PPC_LI32(b2p[TMP_REG_1], imm);
383 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
385 goto bpf_alu32_trunc;
386 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
387 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
388 PPC_OR(dst_reg, dst_reg, src_reg);
389 goto bpf_alu32_trunc;
390 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
391 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
392 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
394 PPC_LI32(b2p[TMP_REG_1], imm);
395 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
398 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
400 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
402 goto bpf_alu32_trunc;
403 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
404 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
405 PPC_XOR(dst_reg, dst_reg, src_reg);
406 goto bpf_alu32_trunc;
407 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
408 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
409 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
411 PPC_LI32(b2p[TMP_REG_1], imm);
412 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
415 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
417 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
419 goto bpf_alu32_trunc;
420 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
421 /* slw clears top 32 bits */
422 PPC_SLW(dst_reg, dst_reg, src_reg);
424 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
425 PPC_SLD(dst_reg, dst_reg, src_reg);
427 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
428 /* with imm 0, we still need to clear top 32 bits */
429 PPC_SLWI(dst_reg, dst_reg, imm);
431 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
433 PPC_SLDI(dst_reg, dst_reg, imm);
435 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
436 PPC_SRW(dst_reg, dst_reg, src_reg);
438 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
439 PPC_SRD(dst_reg, dst_reg, src_reg);
441 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
442 PPC_SRWI(dst_reg, dst_reg, imm);
444 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
446 PPC_SRDI(dst_reg, dst_reg, imm);
448 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
449 PPC_SRAD(dst_reg, dst_reg, src_reg);
451 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
453 PPC_SRADI(dst_reg, dst_reg, imm);
459 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
460 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
461 PPC_MR(dst_reg, src_reg);
462 goto bpf_alu32_trunc;
463 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
464 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
465 PPC_LI32(dst_reg, imm);
467 goto bpf_alu32_trunc;
471 /* Truncate to 32-bits */
472 if (BPF_CLASS(code) == BPF_ALU)
473 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
479 case BPF_ALU | BPF_END | BPF_FROM_LE:
480 case BPF_ALU | BPF_END | BPF_FROM_BE:
481 #ifdef __BIG_ENDIAN__
482 if (BPF_SRC(code) == BPF_FROM_BE)
484 #else /* !__BIG_ENDIAN__ */
485 if (BPF_SRC(code) == BPF_FROM_LE)
490 /* Rotate 8 bits left & mask with 0x0000ff00 */
491 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
492 /* Rotate 8 bits right & insert LSB to reg */
493 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
494 /* Move result back to dst_reg */
495 PPC_MR(dst_reg, b2p[TMP_REG_1]);
499 * Rotate word left by 8 bits:
500 * 2 bytes are already in their final position
501 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
503 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
504 /* Rotate 24 bits and insert byte 1 */
505 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
506 /* Rotate 24 bits and insert byte 3 */
507 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
508 PPC_MR(dst_reg, b2p[TMP_REG_1]);
512 * Way easier and faster(?) to store the value
513 * into stack and then use ldbrx
515 * ctx->seen will be reliable in pass2, but
516 * the instructions generated will remain the
517 * same across all passes
519 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
520 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
521 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
529 /* zero-extend 16 bits into 64 bits */
530 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
533 /* zero-extend 32 bits into 64 bits */
534 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
545 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
546 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
547 if (BPF_CLASS(code) == BPF_ST) {
548 PPC_LI(b2p[TMP_REG_1], imm);
549 src_reg = b2p[TMP_REG_1];
551 PPC_STB(src_reg, dst_reg, off);
553 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
554 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
555 if (BPF_CLASS(code) == BPF_ST) {
556 PPC_LI(b2p[TMP_REG_1], imm);
557 src_reg = b2p[TMP_REG_1];
559 PPC_STH(src_reg, dst_reg, off);
561 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
562 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
563 if (BPF_CLASS(code) == BPF_ST) {
564 PPC_LI32(b2p[TMP_REG_1], imm);
565 src_reg = b2p[TMP_REG_1];
567 PPC_STW(src_reg, dst_reg, off);
569 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
570 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
571 if (BPF_CLASS(code) == BPF_ST) {
572 PPC_LI32(b2p[TMP_REG_1], imm);
573 src_reg = b2p[TMP_REG_1];
575 PPC_STD(src_reg, dst_reg, off);
579 * BPF_STX XADD (atomic_add)
581 /* *(u32 *)(dst + off) += src */
582 case BPF_STX | BPF_XADD | BPF_W:
583 /* Get EA into TMP_REG_1 */
584 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
585 /* error if EA is not word-aligned */
586 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
587 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
588 PPC_LI(b2p[BPF_REG_0], 0);
590 /* load value from memory into TMP_REG_2 */
591 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
592 /* add value from src_reg into this */
593 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
594 /* store result back */
595 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
596 /* we're done if this succeeded */
597 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
598 /* otherwise, let's try once more */
599 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
600 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
601 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
602 /* exit if the store was not successful */
603 PPC_LI(b2p[BPF_REG_0], 0);
604 PPC_BCC(COND_NE, exit_addr);
606 /* *(u64 *)(dst + off) += src */
607 case BPF_STX | BPF_XADD | BPF_DW:
608 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
609 /* error if EA is not doubleword-aligned */
610 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
611 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
612 PPC_LI(b2p[BPF_REG_0], 0);
614 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
615 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
616 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
617 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
618 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
619 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
620 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
621 PPC_LI(b2p[BPF_REG_0], 0);
622 PPC_BCC(COND_NE, exit_addr);
628 /* dst = *(u8 *)(ul) (src + off) */
629 case BPF_LDX | BPF_MEM | BPF_B:
630 PPC_LBZ(dst_reg, src_reg, off);
632 /* dst = *(u16 *)(ul) (src + off) */
633 case BPF_LDX | BPF_MEM | BPF_H:
634 PPC_LHZ(dst_reg, src_reg, off);
636 /* dst = *(u32 *)(ul) (src + off) */
637 case BPF_LDX | BPF_MEM | BPF_W:
638 PPC_LWZ(dst_reg, src_reg, off);
640 /* dst = *(u64 *)(ul) (src + off) */
641 case BPF_LDX | BPF_MEM | BPF_DW:
642 PPC_LD(dst_reg, src_reg, off);
647 * 16 byte instruction that uses two 'struct bpf_insn'
649 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
650 imm64 = ((u64)(u32) insn[i].imm) |
651 (((u64)(u32) insn[i+1].imm) << 32);
652 /* Adjust for two bpf instructions */
653 addrs[++i] = ctx->idx * 4;
654 PPC_LI64(dst_reg, imm64);
660 case BPF_JMP | BPF_EXIT:
662 * If this isn't the very last instruction, branch to
663 * the epilogue. If we _are_ the last instruction,
664 * we'll just fall through to the epilogue.
668 /* else fall through to the epilogue */
674 case BPF_JMP | BPF_CALL:
675 ctx->seen |= SEEN_FUNC;
676 func = (u8 *) __bpf_call_base + imm;
678 /* Save skb pointer if we need to re-cache skb data */
679 if (bpf_helper_changes_skb_data(func))
680 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
682 bpf_jit_emit_func_call(image, ctx, (u64)func);
684 /* move return value from r3 to BPF_REG_0 */
685 PPC_MR(b2p[BPF_REG_0], 3);
687 /* refresh skb cache */
688 if (bpf_helper_changes_skb_data(func)) {
689 /* reload skb pointer to r3 */
690 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
691 bpf_jit_emit_skb_loads(image, ctx);
698 case BPF_JMP | BPF_JA:
699 PPC_JMP(addrs[i + 1 + off]);
702 case BPF_JMP | BPF_JGT | BPF_K:
703 case BPF_JMP | BPF_JGT | BPF_X:
704 case BPF_JMP | BPF_JSGT | BPF_K:
705 case BPF_JMP | BPF_JSGT | BPF_X:
708 case BPF_JMP | BPF_JGE | BPF_K:
709 case BPF_JMP | BPF_JGE | BPF_X:
710 case BPF_JMP | BPF_JSGE | BPF_K:
711 case BPF_JMP | BPF_JSGE | BPF_X:
714 case BPF_JMP | BPF_JEQ | BPF_K:
715 case BPF_JMP | BPF_JEQ | BPF_X:
718 case BPF_JMP | BPF_JNE | BPF_K:
719 case BPF_JMP | BPF_JNE | BPF_X:
722 case BPF_JMP | BPF_JSET | BPF_K:
723 case BPF_JMP | BPF_JSET | BPF_X:
729 case BPF_JMP | BPF_JGT | BPF_X:
730 case BPF_JMP | BPF_JGE | BPF_X:
731 case BPF_JMP | BPF_JEQ | BPF_X:
732 case BPF_JMP | BPF_JNE | BPF_X:
733 /* unsigned comparison */
734 PPC_CMPLD(dst_reg, src_reg);
736 case BPF_JMP | BPF_JSGT | BPF_X:
737 case BPF_JMP | BPF_JSGE | BPF_X:
738 /* signed comparison */
739 PPC_CMPD(dst_reg, src_reg);
741 case BPF_JMP | BPF_JSET | BPF_X:
742 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
744 case BPF_JMP | BPF_JNE | BPF_K:
745 case BPF_JMP | BPF_JEQ | BPF_K:
746 case BPF_JMP | BPF_JGT | BPF_K:
747 case BPF_JMP | BPF_JGE | BPF_K:
749 * Need sign-extended load, so only positive
750 * values can be used as imm in cmpldi
752 if (imm >= 0 && imm < 32768)
753 PPC_CMPLDI(dst_reg, imm);
755 /* sign-extending load */
756 PPC_LI32(b2p[TMP_REG_1], imm);
757 /* ... but unsigned comparison */
758 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
761 case BPF_JMP | BPF_JSGT | BPF_K:
762 case BPF_JMP | BPF_JSGE | BPF_K:
764 * signed comparison, so any 16-bit value
765 * can be used in cmpdi
767 if (imm >= -32768 && imm < 32768)
768 PPC_CMPDI(dst_reg, imm);
770 PPC_LI32(b2p[TMP_REG_1], imm);
771 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
774 case BPF_JMP | BPF_JSET | BPF_K:
775 /* andi does not sign-extend the immediate */
776 if (imm >= 0 && imm < 32768)
777 /* PPC_ANDI is _only/always_ dot-form */
778 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
780 PPC_LI32(b2p[TMP_REG_1], imm);
781 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
786 PPC_BCC(true_cond, addrs[i + 1 + off]);
790 * Loads from packet header/data
791 * Assume 32-bit input value in imm and X (src_reg)
795 case BPF_LD | BPF_W | BPF_ABS:
796 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
797 goto common_load_abs;
798 case BPF_LD | BPF_H | BPF_ABS:
799 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
800 goto common_load_abs;
801 case BPF_LD | BPF_B | BPF_ABS:
802 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
806 * Load into r4, which can just be passed onto
807 * skb load helpers as the second parameter
813 case BPF_LD | BPF_W | BPF_IND:
814 func = (u8 *)sk_load_word;
815 goto common_load_ind;
816 case BPF_LD | BPF_H | BPF_IND:
817 func = (u8 *)sk_load_half;
818 goto common_load_ind;
819 case BPF_LD | BPF_B | BPF_IND:
820 func = (u8 *)sk_load_byte;
823 * Load from [src_reg + imm]
824 * Treat src_reg as a 32-bit value
826 PPC_EXTSW(4, src_reg);
828 if (imm >= -32768 && imm < 32768)
829 PPC_ADDI(4, 4, IMM_L(imm));
831 PPC_LI32(b2p[TMP_REG_1], imm);
832 PPC_ADD(4, 4, b2p[TMP_REG_1]);
837 ctx->seen |= SEEN_SKB;
838 ctx->seen |= SEEN_FUNC;
839 bpf_jit_emit_func_call(image, ctx, (u64)func);
842 * Helper returns 'lt' condition on error, and an
843 * appropriate return value in BPF_REG_0
845 PPC_BCC(COND_LT, exit_addr);
851 case BPF_JMP | BPF_CALL | BPF_X:
855 * The filter contains something cruel & unusual.
856 * We don't handle it, but also there shouldn't be
857 * anything missing from our list.
859 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
865 /* Set end-of-body-code address for exit. */
866 addrs[i] = ctx->idx * 4;
871 void bpf_jit_compile(struct bpf_prog *fp) { }
873 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
880 struct codegen_context cgctx;
883 struct bpf_binary_header *bpf_hdr;
889 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
895 /* Scouting faux-generate pass 0 */
896 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
897 /* We hit something illegal or unsupported. */
901 * Pretend to build prologue, given the features we've seen. This will
902 * update ctgtx.idx as it pretends to output instructions, then we can
903 * calculate total size from idx.
905 bpf_jit_build_prologue(0, &cgctx);
906 bpf_jit_build_epilogue(0, &cgctx);
908 proglen = cgctx.idx * 4;
909 alloclen = proglen + FUNCTION_DESCR_SIZE;
911 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
912 bpf_jit_fill_ill_insns);
916 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
918 /* Code generation passes 1-2 */
919 for (pass = 1; pass < 3; pass++) {
920 /* Now build the prologue, body code & epilogue for real. */
922 bpf_jit_build_prologue(code_base, &cgctx);
923 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
924 bpf_jit_build_epilogue(code_base, &cgctx);
926 if (bpf_jit_enable > 1)
927 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
928 proglen - (cgctx.idx * 4), cgctx.seen);
931 if (bpf_jit_enable > 1)
933 * Note that we output the base address of the code_base
934 * rather than image, since opcodes are in code_base.
936 bpf_jit_dump(flen, proglen, pass, code_base);
939 bpf_flush_icache(bpf_hdr, image + alloclen);
940 #ifdef PPC64_ELF_ABI_v1
941 /* Function descriptor nastiness: Address + TOC */
942 ((u64 *)image)[0] = (u64)code_base;
943 ((u64 *)image)[1] = local_paca->kernel_toc;
945 fp->bpf_func = (void *)image;
954 void bpf_jit_free(struct bpf_prog *fp)
956 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
957 struct bpf_binary_header *bpf_hdr = (void *)addr;
960 bpf_jit_binary_free(bpf_hdr);
962 bpf_prog_unlock_free(fp);