1 { 2 "subtraction bounds (map value) variant 1", 3 .insns = { 4 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 6 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 7 BPF_LD_MAP_FD(BPF_REG_1, 0), 8 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 9 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 10 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 11 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), 12 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 13 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), 14 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 15 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), 16 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 17 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 18 BPF_EXIT_INSN(), 19 BPF_MOV64_IMM(BPF_REG_0, 0), 20 BPF_EXIT_INSN(), 21 }, 22 .fixup_map_hash_8b = { 3 }, 23 .errstr = "R0 max value is outside of the allowed memory range", 24 .result = REJECT, 25 }, 26 { 27 "subtraction bounds (map value) variant 2", 28 .insns = { 29 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 30 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 31 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 32 BPF_LD_MAP_FD(BPF_REG_1, 0), 33 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 34 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 35 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 36 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), 37 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), 38 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), 39 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), 40 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 41 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 42 BPF_EXIT_INSN(), 43 BPF_MOV64_IMM(BPF_REG_0, 0), 44 BPF_EXIT_INSN(), 45 }, 46 .fixup_map_hash_8b = { 3 }, 47 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", 48 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", 49 .result = REJECT, 50 }, 51 { 52 "check subtraction on pointers for unpriv", 53 .insns = { 54 BPF_MOV64_IMM(BPF_REG_0, 0), 55 BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 56 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 57 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), 58 BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9), 59 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 60 BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP), 61 BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0), 62 BPF_LD_MAP_FD(BPF_REG_ARG1, 0), 63 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), 64 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), 65 BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0), 66 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 67 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 68 BPF_EXIT_INSN(), 69 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), 70 BPF_MOV64_IMM(BPF_REG_0, 0), 71 BPF_EXIT_INSN(), 72 }, 73 .fixup_map_hash_8b = { 1, 9 }, 74 .result = ACCEPT, 75 .result_unpriv = REJECT, 76 .errstr_unpriv = "R9 pointer -= pointer prohibited", 77 }, 78 { 79 "bounds check based on zero-extended MOV", 80 .insns = { 81 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 82 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 83 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 84 BPF_LD_MAP_FD(BPF_REG_1, 0), 85 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 86 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 87 /* r2 = 0x0000'0000'ffff'ffff */ 88 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), 89 /* r2 = 0 */ 90 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), 91 /* no-op */ 92 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 93 /* access at offset 0 */ 94 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 95 /* exit */ 96 BPF_MOV64_IMM(BPF_REG_0, 0), 97 BPF_EXIT_INSN(), 98 }, 99 .fixup_map_hash_8b = { 3 }, 100 .result = ACCEPT 101 }, 102 { 103 "bounds check based on sign-extended MOV. test1", 104 .insns = { 105 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 106 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 108 BPF_LD_MAP_FD(BPF_REG_1, 0), 109 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 111 /* r2 = 0xffff'ffff'ffff'ffff */ 112 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), 113 /* r2 = 0xffff'ffff */ 114 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), 115 /* r0 = <oob pointer> */ 116 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 117 /* access to OOB pointer */ 118 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 119 /* exit */ 120 BPF_MOV64_IMM(BPF_REG_0, 0), 121 BPF_EXIT_INSN(), 122 }, 123 .fixup_map_hash_8b = { 3 }, 124 .errstr = "map_value pointer and 4294967295", 125 .result = REJECT 126 }, 127 { 128 "bounds check based on sign-extended MOV. test2", 129 .insns = { 130 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 131 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 132 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 133 BPF_LD_MAP_FD(BPF_REG_1, 0), 134 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 135 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 136 /* r2 = 0xffff'ffff'ffff'ffff */ 137 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), 138 /* r2 = 0xfff'ffff */ 139 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), 140 /* r0 = <oob pointer> */ 141 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), 142 /* access to OOB pointer */ 143 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 144 /* exit */ 145 BPF_MOV64_IMM(BPF_REG_0, 0), 146 BPF_EXIT_INSN(), 147 }, 148 .fixup_map_hash_8b = { 3 }, 149 .errstr = "R0 min value is outside of the allowed memory range", 150 .result = REJECT 151 }, 152 { 153 "bounds check based on reg_off + var_off + insn_off. test1", 154 .insns = { 155 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 156 offsetof(struct __sk_buff, mark)), 157 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 158 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 160 BPF_LD_MAP_FD(BPF_REG_1, 0), 161 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 162 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 163 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), 164 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), 165 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), 166 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), 167 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), 168 BPF_MOV64_IMM(BPF_REG_0, 0), 169 BPF_EXIT_INSN(), 170 }, 171 .fixup_map_hash_8b = { 4 }, 172 .errstr = "value_size=8 off=1073741825", 173 .result = REJECT, 174 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 175 }, 176 { 177 "bounds check based on reg_off + var_off + insn_off. test2", 178 .insns = { 179 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 180 offsetof(struct __sk_buff, mark)), 181 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 182 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 183 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 184 BPF_LD_MAP_FD(BPF_REG_1, 0), 185 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 186 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 187 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), 188 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), 189 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), 190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), 191 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), 192 BPF_MOV64_IMM(BPF_REG_0, 0), 193 BPF_EXIT_INSN(), 194 }, 195 .fixup_map_hash_8b = { 4 }, 196 .errstr = "value 1073741823", 197 .result = REJECT, 198 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 199 }, 200 { 201 "bounds check after truncation of non-boundary-crossing range", 202 .insns = { 203 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 204 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 205 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 206 BPF_LD_MAP_FD(BPF_REG_1, 0), 207 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 208 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 209 /* r1 = [0x00, 0xff] */ 210 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 211 BPF_MOV64_IMM(BPF_REG_2, 1), 212 /* r2 = 0x10'0000'0000 */ 213 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), 214 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ 215 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), 216 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ 217 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), 218 /* r1 = [0x00, 0xff] */ 219 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), 220 /* r1 = 0 */ 221 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 222 /* no-op */ 223 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 224 /* access at offset 0 */ 225 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 226 /* exit */ 227 BPF_MOV64_IMM(BPF_REG_0, 0), 228 BPF_EXIT_INSN(), 229 }, 230 .fixup_map_hash_8b = { 3 }, 231 .result = ACCEPT 232 }, 233 { 234 "bounds check after truncation of boundary-crossing range (1)", 235 .insns = { 236 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 237 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 239 BPF_LD_MAP_FD(BPF_REG_1, 0), 240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 241 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 242 /* r1 = [0x00, 0xff] */ 243 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), 245 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ 246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), 247 /* r1 = [0xffff'ff80, 0xffff'ffff] or 248 * [0x0000'0000, 0x0000'007f] 249 */ 250 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), 251 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 252 /* r1 = [0x00, 0xff] or 253 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 254 */ 255 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 256 /* error on OOB pointer computation */ 257 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 258 /* exit */ 259 BPF_MOV64_IMM(BPF_REG_0, 0), 260 BPF_EXIT_INSN(), 261 }, 262 .fixup_map_hash_8b = { 3 }, 263 /* not actually fully unbounded, but the bound is very high */ 264 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 265 .result_unpriv = REJECT, 266 .errstr = "value -4294967168 makes map_value pointer be out of bounds", 267 .result = REJECT, 268 }, 269 { 270 "bounds check after truncation of boundary-crossing range (2)", 271 .insns = { 272 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 273 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 275 BPF_LD_MAP_FD(BPF_REG_1, 0), 276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 277 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 278 /* r1 = [0x00, 0xff] */ 279 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), 281 /* r1 = [0xffff'ff80, 0x1'0000'007f] */ 282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), 283 /* r1 = [0xffff'ff80, 0xffff'ffff] or 284 * [0x0000'0000, 0x0000'007f] 285 * difference to previous test: truncation via MOV32 286 * instead of ALU32. 287 */ 288 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), 289 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 290 /* r1 = [0x00, 0xff] or 291 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 292 */ 293 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 294 /* error on OOB pointer computation */ 295 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 296 /* exit */ 297 BPF_MOV64_IMM(BPF_REG_0, 0), 298 BPF_EXIT_INSN(), 299 }, 300 .fixup_map_hash_8b = { 3 }, 301 /* not actually fully unbounded, but the bound is very high */ 302 .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 303 .result_unpriv = REJECT, 304 .errstr = "value -4294967168 makes map_value pointer be out of bounds", 305 .result = REJECT, 306 }, 307 { 308 "bounds check after wrapping 32-bit addition", 309 .insns = { 310 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 311 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 313 BPF_LD_MAP_FD(BPF_REG_1, 0), 314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 315 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 316 /* r1 = 0x7fff'ffff */ 317 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), 318 /* r1 = 0xffff'fffe */ 319 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), 320 /* r1 = 0 */ 321 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), 322 /* no-op */ 323 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 324 /* access at offset 0 */ 325 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 326 /* exit */ 327 BPF_MOV64_IMM(BPF_REG_0, 0), 328 BPF_EXIT_INSN(), 329 }, 330 .fixup_map_hash_8b = { 3 }, 331 .result = ACCEPT 332 }, 333 { 334 "bounds check after shift with oversized count operand", 335 .insns = { 336 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 339 BPF_LD_MAP_FD(BPF_REG_1, 0), 340 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 341 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 342 BPF_MOV64_IMM(BPF_REG_2, 32), 343 BPF_MOV64_IMM(BPF_REG_1, 1), 344 /* r1 = (u32)1 << (u32)32 = ? */ 345 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), 346 /* r1 = [0x0000, 0xffff] */ 347 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), 348 /* computes unknown pointer, potentially OOB */ 349 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 350 /* potentially OOB access */ 351 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 352 /* exit */ 353 BPF_MOV64_IMM(BPF_REG_0, 0), 354 BPF_EXIT_INSN(), 355 }, 356 .fixup_map_hash_8b = { 3 }, 357 .errstr = "R0 max value is outside of the allowed memory range", 358 .result = REJECT 359 }, 360 { 361 "bounds check after right shift of maybe-negative number", 362 .insns = { 363 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 364 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 365 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 366 BPF_LD_MAP_FD(BPF_REG_1, 0), 367 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 368 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 369 /* r1 = [0x00, 0xff] */ 370 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 371 /* r1 = [-0x01, 0xfe] */ 372 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), 373 /* r1 = 0 or 0xff'ffff'ffff'ffff */ 374 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 375 /* r1 = 0 or 0xffff'ffff'ffff */ 376 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 377 /* computes unknown pointer, potentially OOB */ 378 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 379 /* potentially OOB access */ 380 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), 381 /* exit */ 382 BPF_MOV64_IMM(BPF_REG_0, 0), 383 BPF_EXIT_INSN(), 384 }, 385 .fixup_map_hash_8b = { 3 }, 386 .errstr = "R0 unbounded memory access", 387 .result = REJECT 388 }, 389 { 390 "bounds check after 32-bit right shift with 64-bit input", 391 .insns = { 392 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 395 BPF_LD_MAP_FD(BPF_REG_1, 0), 396 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 397 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 398 /* r1 = 2 */ 399 BPF_MOV64_IMM(BPF_REG_1, 2), 400 /* r1 = 1<<32 */ 401 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), 402 /* r1 = 0 (NOT 2!) */ 403 BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31), 404 /* r1 = 0xffff'fffe (NOT 0!) */ 405 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2), 406 /* error on computing OOB pointer */ 407 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 408 /* exit */ 409 BPF_MOV64_IMM(BPF_REG_0, 0), 410 BPF_EXIT_INSN(), 411 }, 412 .fixup_map_hash_8b = { 3 }, 413 .errstr = "math between map_value pointer and 4294967294 is not allowed", 414 .result = REJECT, 415 }, 416 { 417 "bounds check map access with off+size signed 32bit overflow. test1", 418 .insns = { 419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 422 BPF_LD_MAP_FD(BPF_REG_1, 0), 423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 424 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 425 BPF_EXIT_INSN(), 426 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), 427 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 428 BPF_JMP_A(0), 429 BPF_EXIT_INSN(), 430 }, 431 .fixup_map_hash_8b = { 3 }, 432 .errstr = "map_value pointer and 2147483646", 433 .result = REJECT 434 }, 435 { 436 "bounds check map access with off+size signed 32bit overflow. test2", 437 .insns = { 438 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 441 BPF_LD_MAP_FD(BPF_REG_1, 0), 442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 443 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 444 BPF_EXIT_INSN(), 445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), 446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), 447 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), 448 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), 449 BPF_JMP_A(0), 450 BPF_EXIT_INSN(), 451 }, 452 .fixup_map_hash_8b = { 3 }, 453 .errstr = "pointer offset 1073741822", 454 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 455 .result = REJECT 456 }, 457 { 458 "bounds check map access with off+size signed 32bit overflow. test3", 459 .insns = { 460 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 461 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 463 BPF_LD_MAP_FD(BPF_REG_1, 0), 464 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 465 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 466 BPF_EXIT_INSN(), 467 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), 468 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), 469 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), 470 BPF_JMP_A(0), 471 BPF_EXIT_INSN(), 472 }, 473 .fixup_map_hash_8b = { 3 }, 474 .errstr = "pointer offset -1073741822", 475 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 476 .result = REJECT 477 }, 478 { 479 "bounds check map access with off+size signed 32bit overflow. test4", 480 .insns = { 481 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 482 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 483 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 484 BPF_LD_MAP_FD(BPF_REG_1, 0), 485 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 486 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 487 BPF_EXIT_INSN(), 488 BPF_MOV64_IMM(BPF_REG_1, 1000000), 489 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), 490 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 491 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), 492 BPF_JMP_A(0), 493 BPF_EXIT_INSN(), 494 }, 495 .fixup_map_hash_8b = { 3 }, 496 .errstr = "map_value pointer and 1000000000000", 497 .result = REJECT 498 }, 499 { 500 "bounds check mixed 32bit and 64bit arithmetic. test1", 501 .insns = { 502 BPF_MOV64_IMM(BPF_REG_0, 0), 503 BPF_MOV64_IMM(BPF_REG_1, -1), 504 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), 505 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 506 /* r1 = 0xffffFFFF00000001 */ 507 BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3), 508 /* check ALU64 op keeps 32bit bounds */ 509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 510 BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1), 511 BPF_JMP_A(1), 512 /* invalid ldx if bounds are lost above */ 513 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), 514 BPF_EXIT_INSN(), 515 }, 516 .result = ACCEPT 517 }, 518 { 519 "bounds check mixed 32bit and 64bit arithmetic. test2", 520 .insns = { 521 BPF_MOV64_IMM(BPF_REG_0, 0), 522 BPF_MOV64_IMM(BPF_REG_1, -1), 523 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), 524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), 525 /* r1 = 0xffffFFFF00000001 */ 526 BPF_MOV64_IMM(BPF_REG_2, 3), 527 /* r1 = 0x2 */ 528 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), 529 /* check ALU32 op zero extends 64bit bounds */ 530 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1), 531 BPF_JMP_A(1), 532 /* invalid ldx if bounds are lost above */ 533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), 534 BPF_EXIT_INSN(), 535 }, 536 .result = ACCEPT 537 }, 538 { 539 "assigning 32bit bounds to 64bit for wA = 0, wB = wA", 540 .insns = { 541 BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 542 offsetof(struct __sk_buff, data_end)), 543 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 544 offsetof(struct __sk_buff, data)), 545 BPF_MOV32_IMM(BPF_REG_9, 0), 546 BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), 547 BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 548 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), 549 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), 551 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), 552 BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), 553 BPF_MOV64_IMM(BPF_REG_0, 0), 554 BPF_EXIT_INSN(), 555 }, 556 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 557 .result = ACCEPT, 558 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 559 }, 560 { 561 "bounds check for reg = 0, reg xor 1", 562 .insns = { 563 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 564 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 565 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 566 BPF_LD_MAP_FD(BPF_REG_1, 0), 567 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 568 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 569 BPF_EXIT_INSN(), 570 BPF_MOV64_IMM(BPF_REG_1, 0), 571 BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1), 572 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 573 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 574 BPF_MOV64_IMM(BPF_REG_0, 0), 575 BPF_EXIT_INSN(), 576 }, 577 .fixup_map_hash_8b = { 3 }, 578 .result = ACCEPT, 579 }, 580 { 581 "bounds check for reg32 = 0, reg32 xor 1", 582 .insns = { 583 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 584 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 586 BPF_LD_MAP_FD(BPF_REG_1, 0), 587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 588 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 589 BPF_EXIT_INSN(), 590 BPF_MOV32_IMM(BPF_REG_1, 0), 591 BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1), 592 BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), 593 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 594 BPF_MOV64_IMM(BPF_REG_0, 0), 595 BPF_EXIT_INSN(), 596 }, 597 .fixup_map_hash_8b = { 3 }, 598 .result = ACCEPT, 599 }, 600 { 601 "bounds check for reg = 2, reg xor 3", 602 .insns = { 603 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 604 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 605 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 606 BPF_LD_MAP_FD(BPF_REG_1, 0), 607 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 608 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 609 BPF_EXIT_INSN(), 610 BPF_MOV64_IMM(BPF_REG_1, 2), 611 BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), 612 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1), 613 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 614 BPF_MOV64_IMM(BPF_REG_0, 0), 615 BPF_EXIT_INSN(), 616 }, 617 .fixup_map_hash_8b = { 3 }, 618 .result = ACCEPT, 619 }, 620 { 621 "bounds check for reg = any, reg xor 3", 622 .insns = { 623 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 624 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 625 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 626 BPF_LD_MAP_FD(BPF_REG_1, 0), 627 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 628 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 629 BPF_EXIT_INSN(), 630 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 631 BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), 632 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), 633 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 634 BPF_MOV64_IMM(BPF_REG_0, 0), 635 BPF_EXIT_INSN(), 636 }, 637 .fixup_map_hash_8b = { 3 }, 638 .result = REJECT, 639 .errstr = "invalid access to map value", 640 .errstr_unpriv = "invalid access to map value", 641 }, 642 { 643 "bounds check for reg32 = any, reg32 xor 3", 644 .insns = { 645 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 646 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 648 BPF_LD_MAP_FD(BPF_REG_1, 0), 649 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 650 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 651 BPF_EXIT_INSN(), 652 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 653 BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), 654 BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), 655 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 656 BPF_MOV64_IMM(BPF_REG_0, 0), 657 BPF_EXIT_INSN(), 658 }, 659 .fixup_map_hash_8b = { 3 }, 660 .result = REJECT, 661 .errstr = "invalid access to map value", 662 .errstr_unpriv = "invalid access to map value", 663 }, 664 { 665 "bounds check for reg > 0, reg xor 3", 666 .insns = { 667 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 668 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 669 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 670 BPF_LD_MAP_FD(BPF_REG_1, 0), 671 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 672 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 673 BPF_EXIT_INSN(), 674 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 675 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3), 676 BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), 677 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1), 678 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 679 BPF_MOV64_IMM(BPF_REG_0, 0), 680 BPF_EXIT_INSN(), 681 }, 682 .fixup_map_hash_8b = { 3 }, 683 .result = ACCEPT, 684 }, 685 { 686 "bounds check for reg32 > 0, reg32 xor 3", 687 .insns = { 688 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 689 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 690 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 691 BPF_LD_MAP_FD(BPF_REG_1, 0), 692 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 693 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 694 BPF_EXIT_INSN(), 695 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 696 BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3), 697 BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), 698 BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1), 699 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), 700 BPF_MOV64_IMM(BPF_REG_0, 0), 701 BPF_EXIT_INSN(), 702 }, 703 .fixup_map_hash_8b = { 3 }, 704 .result = ACCEPT, 705 }, 706 { 707 "bounds checks after 32-bit truncation. test 1", 708 .insns = { 709 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 710 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 711 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 712 BPF_LD_MAP_FD(BPF_REG_1, 0), 713 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 715 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 716 /* This used to reduce the max bound to 0x7fffffff */ 717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), 718 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1), 719 BPF_MOV64_IMM(BPF_REG_0, 0), 720 BPF_EXIT_INSN(), 721 }, 722 .fixup_map_hash_8b = { 3 }, 723 .errstr_unpriv = "R0 leaks addr", 724 .result_unpriv = REJECT, 725 .result = ACCEPT, 726 }, 727 { 728 "bounds checks after 32-bit truncation. test 2", 729 .insns = { 730 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 733 BPF_LD_MAP_FD(BPF_REG_1, 0), 734 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 736 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 737 BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1), 738 BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1), 739 BPF_MOV64_IMM(BPF_REG_0, 0), 740 BPF_EXIT_INSN(), 741 }, 742 .fixup_map_hash_8b = { 3 }, 743 .errstr_unpriv = "R0 leaks addr", 744 .result_unpriv = REJECT, 745 .result = ACCEPT, 746 }, 747