drm/i915: Reject commands that explicitly generate interrupts
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_cmd_parser.c
1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Brad Volkin <bradley.d.volkin@intel.com>
25 *
26 */
27
28 #include "i915_drv.h"
29
30 /**
31 * DOC: i915 batch buffer command parser
32 *
33 * Motivation:
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35 * require userspace code to submit batches containing commands such as
36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37 * generations of the hardware will noop these commands in "unsecure" batches
38 * (which includes all userspace batches submitted via i915) even though the
39 * commands may be safe and represent the intended programming model of the
40 * device.
41 *
42 * The software command parser is similar in operation to the command parsing
43 * done in hardware for unsecure batches. However, the software parser allows
44 * some operations that would be noop'd by hardware, if the parser determines
45 * the operation is safe, and submits the batch as "secure" to prevent hardware
46 * parsing.
47 *
48 * Threats:
49 * At a high level, the hardware (and software) checks attempt to prevent
50 * granting userspace undue privileges. There are three categories of privilege.
51 *
52 * First, commands which are explicitly defined as privileged or which should
53 * only be used by the kernel driver. The parser generally rejects such
54 * commands, though it may allow some from the drm master process.
55 *
56 * Second, commands which access registers. To support correct/enhanced
57 * userspace functionality, particularly certain OpenGL extensions, the parser
58 * provides a whitelist of registers which userspace may safely access (for both
59 * normal and drm master processes).
60 *
61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62 * The parser always rejects such commands.
63 *
64 * The majority of the problematic commands fall in the MI_* range, with only a
65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
66 *
67 * Implementation:
68 * Each ring maintains tables of commands and registers which the parser uses in
69 * scanning batch buffers submitted to that ring.
70 *
71 * Since the set of commands that the parser must check for is significantly
72 * smaller than the number of commands supported, the parser tables contain only
73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is
76 * implementated via a per-ring length decoding vfunc.
77 *
78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands.
80 * To handle this, the parser provides a way to define explicit "skip" entries
81 * in the per-ring command tables.
82 *
83 * Other command table entries map fairly directly to high level categories
84 * mentioned above: rejected, master-only, register whitelist. The parser
85 * implements a number of checks, including the privileged memory checks, via a
86 * general bitmasking mechanism.
87 */
88
89 #define STD_MI_OPCODE_MASK 0xFF800000
90 #define STD_3D_OPCODE_MASK 0xFFFF0000
91 #define STD_2D_OPCODE_MASK 0xFFC00000
92 #define STD_MFX_OPCODE_MASK 0xFFFF0000
93
94 #define CMD(op, opm, f, lm, fl, ...) \
95 { \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \
98 .length = { (lm) }, \
99 __VA_ARGS__ \
100 }
101
102 /* Convenience macros to compress the tables */
103 #define SMI STD_MI_OPCODE_MASK
104 #define S3D STD_3D_OPCODE_MASK
105 #define S2D STD_2D_OPCODE_MASK
106 #define SMFX STD_MFX_OPCODE_MASK
107 #define F true
108 #define S CMD_DESC_SKIP
109 #define R CMD_DESC_REJECT
110 #define W CMD_DESC_REGISTER
111 #define B CMD_DESC_BITMASK
112 #define M CMD_DESC_MASTER
113
114 /* Command Mask Fixed Len Action
115 ---------------------------------------------------------- */
116 static const struct drm_i915_cmd_descriptor common_cmds[] = {
117 CMD( MI_NOOP, SMI, F, 1, S ),
118 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
119 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
120 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
121 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
122 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W,
128 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
129 CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W,
130 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
131 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
132 };
133
134 static const struct drm_i915_cmd_descriptor render_cmds[] = {
135 CMD( MI_FLUSH, SMI, F, 1, S ),
136 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
137 CMD( MI_PREDICATE, SMI, F, 1, S ),
138 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
139 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
140 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
141 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
142 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
143 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, S ),
144 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, S ),
145 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
146 CMD( PIPELINE_SELECT, S3D, F, 1, S ),
147 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
148 .bits = {{
149 .offset = 2,
150 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
151 .expected = 0,
152 }}, ),
153 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
154 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
155 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
156 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
157 .bits = {{
158 .offset = 1,
159 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
160 .expected = 0,
161 }}, ),
162 };
163
164 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
165 CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
166 CMD( MI_RS_CONTROL, SMI, F, 1, S ),
167 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
168 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
169 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
170 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
171 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
172 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
173 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
174 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
175 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
176 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
177
178 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
179 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
180 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
181 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
182 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
183 };
184
185 static const struct drm_i915_cmd_descriptor video_cmds[] = {
186 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
187 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, S ),
188 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
189 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
190 .bits = {{
191 .offset = 0,
192 .mask = MI_FLUSH_DW_NOTIFY,
193 .expected = 0,
194 }}, ),
195 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, S ),
196 /*
197 * MFX_WAIT doesn't fit the way we handle length for most commands.
198 * It has a length field but it uses a non-standard length bias.
199 * It is always 1 dword though, so just treat it as fixed length.
200 */
201 CMD( MFX_WAIT, SMFX, F, 1, S ),
202 };
203
204 static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
205 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
206 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, S ),
207 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
208 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
209 .bits = {{
210 .offset = 0,
211 .mask = MI_FLUSH_DW_NOTIFY,
212 .expected = 0,
213 }}, ),
214 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, S ),
215 };
216
217 static const struct drm_i915_cmd_descriptor blt_cmds[] = {
218 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
219 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
220 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
221 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
222 .bits = {{
223 .offset = 0,
224 .mask = MI_FLUSH_DW_NOTIFY,
225 .expected = 0,
226 }}, ),
227 CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
228 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
229 };
230
231 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
232 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
233 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
234 };
235
236 #undef CMD
237 #undef SMI
238 #undef S3D
239 #undef S2D
240 #undef SMFX
241 #undef F
242 #undef S
243 #undef R
244 #undef W
245 #undef B
246 #undef M
247
248 static const struct drm_i915_cmd_table gen7_render_cmds[] = {
249 { common_cmds, ARRAY_SIZE(common_cmds) },
250 { render_cmds, ARRAY_SIZE(render_cmds) },
251 };
252
253 static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
254 { common_cmds, ARRAY_SIZE(common_cmds) },
255 { render_cmds, ARRAY_SIZE(render_cmds) },
256 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
257 };
258
259 static const struct drm_i915_cmd_table gen7_video_cmds[] = {
260 { common_cmds, ARRAY_SIZE(common_cmds) },
261 { video_cmds, ARRAY_SIZE(video_cmds) },
262 };
263
264 static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
265 { common_cmds, ARRAY_SIZE(common_cmds) },
266 { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
267 };
268
269 static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
270 { common_cmds, ARRAY_SIZE(common_cmds) },
271 { blt_cmds, ARRAY_SIZE(blt_cmds) },
272 };
273
274 static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
275 { common_cmds, ARRAY_SIZE(common_cmds) },
276 { blt_cmds, ARRAY_SIZE(blt_cmds) },
277 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
278 };
279
280 /*
281 * Register whitelists, sorted by increasing register offset.
282 *
283 * Some registers that userspace accesses are 64 bits. The register
284 * access commands only allow 32-bit accesses. Hence, we have to include
285 * entries for both halves of the 64-bit registers.
286 */
287
288 /* Convenience macro for adding 64-bit registers */
289 #define REG64(addr) (addr), (addr + sizeof(u32))
290
291 static const u32 gen7_render_regs[] = {
292 REG64(HS_INVOCATION_COUNT),
293 REG64(DS_INVOCATION_COUNT),
294 REG64(IA_VERTICES_COUNT),
295 REG64(IA_PRIMITIVES_COUNT),
296 REG64(VS_INVOCATION_COUNT),
297 REG64(GS_INVOCATION_COUNT),
298 REG64(GS_PRIMITIVES_COUNT),
299 REG64(CL_INVOCATION_COUNT),
300 REG64(CL_PRIMITIVES_COUNT),
301 REG64(PS_INVOCATION_COUNT),
302 REG64(PS_DEPTH_COUNT),
303 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
304 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
305 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
306 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
307 GEN7_SO_WRITE_OFFSET(0),
308 GEN7_SO_WRITE_OFFSET(1),
309 GEN7_SO_WRITE_OFFSET(2),
310 GEN7_SO_WRITE_OFFSET(3),
311 };
312
313 static const u32 gen7_blt_regs[] = {
314 BCS_SWCTRL,
315 };
316
317 static const u32 ivb_master_regs[] = {
318 FORCEWAKE_MT,
319 DERRMR,
320 GEN7_PIPE_DE_LOAD_SL(PIPE_A),
321 GEN7_PIPE_DE_LOAD_SL(PIPE_B),
322 GEN7_PIPE_DE_LOAD_SL(PIPE_C),
323 };
324
325 static const u32 hsw_master_regs[] = {
326 FORCEWAKE_MT,
327 DERRMR,
328 };
329
330 #undef REG64
331
332 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
333 {
334 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
335 u32 subclient =
336 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
337
338 if (client == INSTR_MI_CLIENT)
339 return 0x3F;
340 else if (client == INSTR_RC_CLIENT) {
341 if (subclient == INSTR_MEDIA_SUBCLIENT)
342 return 0xFFFF;
343 else
344 return 0xFF;
345 }
346
347 DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
348 return 0;
349 }
350
351 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
352 {
353 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
354 u32 subclient =
355 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
356
357 if (client == INSTR_MI_CLIENT)
358 return 0x3F;
359 else if (client == INSTR_RC_CLIENT) {
360 if (subclient == INSTR_MEDIA_SUBCLIENT)
361 return 0xFFF;
362 else
363 return 0xFF;
364 }
365
366 DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
367 return 0;
368 }
369
370 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
371 {
372 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
373
374 if (client == INSTR_MI_CLIENT)
375 return 0x3F;
376 else if (client == INSTR_BC_CLIENT)
377 return 0xFF;
378
379 DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
380 return 0;
381 }
382
383 static void validate_cmds_sorted(struct intel_ring_buffer *ring)
384 {
385 int i;
386
387 if (!ring->cmd_tables || ring->cmd_table_count == 0)
388 return;
389
390 for (i = 0; i < ring->cmd_table_count; i++) {
391 const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
392 u32 previous = 0;
393 int j;
394
395 for (j = 0; j < table->count; j++) {
396 const struct drm_i915_cmd_descriptor *desc =
397 &table->table[i];
398 u32 curr = desc->cmd.value & desc->cmd.mask;
399
400 if (curr < previous)
401 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
402 ring->id, i, j, curr, previous);
403
404 previous = curr;
405 }
406 }
407 }
408
409 static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
410 {
411 int i;
412 u32 previous = 0;
413
414 for (i = 0; i < reg_count; i++) {
415 u32 curr = reg_table[i];
416
417 if (curr < previous)
418 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
419 ring_id, i, curr, previous);
420
421 previous = curr;
422 }
423 }
424
425 static void validate_regs_sorted(struct intel_ring_buffer *ring)
426 {
427 check_sorted(ring->id, ring->reg_table, ring->reg_count);
428 check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
429 }
430
431 /**
432 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
433 * @ring: the ringbuffer to initialize
434 *
435 * Optionally initializes fields related to batch buffer command parsing in the
436 * struct intel_ring_buffer based on whether the platform requires software
437 * command parsing.
438 */
439 void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
440 {
441 if (!IS_GEN7(ring->dev))
442 return;
443
444 switch (ring->id) {
445 case RCS:
446 if (IS_HASWELL(ring->dev)) {
447 ring->cmd_tables = hsw_render_ring_cmds;
448 ring->cmd_table_count =
449 ARRAY_SIZE(hsw_render_ring_cmds);
450 } else {
451 ring->cmd_tables = gen7_render_cmds;
452 ring->cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
453 }
454
455 ring->reg_table = gen7_render_regs;
456 ring->reg_count = ARRAY_SIZE(gen7_render_regs);
457
458 if (IS_HASWELL(ring->dev)) {
459 ring->master_reg_table = hsw_master_regs;
460 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
461 } else {
462 ring->master_reg_table = ivb_master_regs;
463 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
464 }
465
466 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
467 break;
468 case VCS:
469 ring->cmd_tables = gen7_video_cmds;
470 ring->cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
471 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
472 break;
473 case BCS:
474 if (IS_HASWELL(ring->dev)) {
475 ring->cmd_tables = hsw_blt_ring_cmds;
476 ring->cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
477 } else {
478 ring->cmd_tables = gen7_blt_cmds;
479 ring->cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
480 }
481
482 ring->reg_table = gen7_blt_regs;
483 ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
484
485 if (IS_HASWELL(ring->dev)) {
486 ring->master_reg_table = hsw_master_regs;
487 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
488 } else {
489 ring->master_reg_table = ivb_master_regs;
490 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
491 }
492
493 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
494 break;
495 case VECS:
496 ring->cmd_tables = hsw_vebox_cmds;
497 ring->cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
498 /* VECS can use the same length_mask function as VCS */
499 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
500 break;
501 default:
502 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
503 ring->id);
504 BUG();
505 }
506
507 validate_cmds_sorted(ring);
508 validate_regs_sorted(ring);
509 }
510
511 static const struct drm_i915_cmd_descriptor*
512 find_cmd_in_table(const struct drm_i915_cmd_table *table,
513 u32 cmd_header)
514 {
515 int i;
516
517 for (i = 0; i < table->count; i++) {
518 const struct drm_i915_cmd_descriptor *desc = &table->table[i];
519 u32 masked_cmd = desc->cmd.mask & cmd_header;
520 u32 masked_value = desc->cmd.value & desc->cmd.mask;
521
522 if (masked_cmd == masked_value)
523 return desc;
524 }
525
526 return NULL;
527 }
528
529 /*
530 * Returns a pointer to a descriptor for the command specified by cmd_header.
531 *
532 * The caller must supply space for a default descriptor via the default_desc
533 * parameter. If no descriptor for the specified command exists in the ring's
534 * command parser tables, this function fills in default_desc based on the
535 * ring's default length encoding and returns default_desc.
536 */
537 static const struct drm_i915_cmd_descriptor*
538 find_cmd(struct intel_ring_buffer *ring,
539 u32 cmd_header,
540 struct drm_i915_cmd_descriptor *default_desc)
541 {
542 u32 mask;
543 int i;
544
545 for (i = 0; i < ring->cmd_table_count; i++) {
546 const struct drm_i915_cmd_descriptor *desc;
547
548 desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
549 if (desc)
550 return desc;
551 }
552
553 mask = ring->get_cmd_length_mask(cmd_header);
554 if (!mask)
555 return NULL;
556
557 BUG_ON(!default_desc);
558 default_desc->flags = CMD_DESC_SKIP;
559 default_desc->length.mask = mask;
560
561 return default_desc;
562 }
563
564 static bool valid_reg(const u32 *table, int count, u32 addr)
565 {
566 if (table && count != 0) {
567 int i;
568
569 for (i = 0; i < count; i++) {
570 if (table[i] == addr)
571 return true;
572 }
573 }
574
575 return false;
576 }
577
578 static u32 *vmap_batch(struct drm_i915_gem_object *obj)
579 {
580 int i;
581 void *addr = NULL;
582 struct sg_page_iter sg_iter;
583 struct page **pages;
584
585 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
586 if (pages == NULL) {
587 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
588 goto finish;
589 }
590
591 i = 0;
592 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
593 pages[i] = sg_page_iter_page(&sg_iter);
594 i++;
595 }
596
597 addr = vmap(pages, i, 0, PAGE_KERNEL);
598 if (addr == NULL) {
599 DRM_DEBUG_DRIVER("Failed to vmap pages\n");
600 goto finish;
601 }
602
603 finish:
604 if (pages)
605 drm_free_large(pages);
606 return (u32*)addr;
607 }
608
609 /**
610 * i915_needs_cmd_parser() - should a given ring use software command parsing?
611 * @ring: the ring in question
612 *
613 * Only certain platforms require software batch buffer command parsing, and
614 * only when enabled via module paramter.
615 *
616 * Return: true if the ring requires software command parsing
617 */
618 bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
619 {
620 /* No command tables indicates a platform without parsing */
621 if (!ring->cmd_tables)
622 return false;
623
624 return (i915.enable_cmd_parser == 1);
625 }
626
627 #define LENGTH_BIAS 2
628
629 /**
630 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
631 * @ring: the ring on which the batch is to execute
632 * @batch_obj: the batch buffer in question
633 * @batch_start_offset: byte offset in the batch at which execution starts
634 * @is_master: is the submitting process the drm master?
635 *
636 * Parses the specified batch buffer looking for privilege violations as
637 * described in the overview.
638 *
639 * Return: non-zero if the parser finds violations or otherwise fails
640 */
641 int i915_parse_cmds(struct intel_ring_buffer *ring,
642 struct drm_i915_gem_object *batch_obj,
643 u32 batch_start_offset,
644 bool is_master)
645 {
646 int ret = 0;
647 u32 *cmd, *batch_base, *batch_end;
648 struct drm_i915_cmd_descriptor default_desc = { 0 };
649 int needs_clflush = 0;
650
651 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
652 if (ret) {
653 DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
654 return ret;
655 }
656
657 batch_base = vmap_batch(batch_obj);
658 if (!batch_base) {
659 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
660 i915_gem_object_unpin_pages(batch_obj);
661 return -ENOMEM;
662 }
663
664 if (needs_clflush)
665 drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
666
667 cmd = batch_base + (batch_start_offset / sizeof(*cmd));
668 batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
669
670 while (cmd < batch_end) {
671 const struct drm_i915_cmd_descriptor *desc;
672 u32 length;
673
674 if (*cmd == MI_BATCH_BUFFER_END)
675 break;
676
677 desc = find_cmd(ring, *cmd, &default_desc);
678 if (!desc) {
679 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
680 *cmd);
681 ret = -EINVAL;
682 break;
683 }
684
685 if (desc->flags & CMD_DESC_FIXED)
686 length = desc->length.fixed;
687 else
688 length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
689
690 if ((batch_end - cmd) < length) {
691 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%td\n",
692 *cmd,
693 length,
694 batch_end - cmd);
695 ret = -EINVAL;
696 break;
697 }
698
699 if (desc->flags & CMD_DESC_REJECT) {
700 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
701 ret = -EINVAL;
702 break;
703 }
704
705 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
706 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
707 *cmd);
708 ret = -EINVAL;
709 break;
710 }
711
712 if (desc->flags & CMD_DESC_REGISTER) {
713 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
714
715 if (!valid_reg(ring->reg_table,
716 ring->reg_count, reg_addr)) {
717 if (!is_master ||
718 !valid_reg(ring->master_reg_table,
719 ring->master_reg_count,
720 reg_addr)) {
721 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
722 reg_addr,
723 *cmd,
724 ring->id);
725 ret = -EINVAL;
726 break;
727 }
728 }
729 }
730
731 if (desc->flags & CMD_DESC_BITMASK) {
732 int i;
733
734 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
735 u32 dword;
736
737 if (desc->bits[i].mask == 0)
738 break;
739
740 dword = cmd[desc->bits[i].offset] &
741 desc->bits[i].mask;
742
743 if (dword != desc->bits[i].expected) {
744 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
745 *cmd,
746 desc->bits[i].mask,
747 desc->bits[i].expected,
748 dword, ring->id);
749 ret = -EINVAL;
750 break;
751 }
752 }
753
754 if (ret)
755 break;
756 }
757
758 cmd += length;
759 }
760
761 if (cmd >= batch_end) {
762 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
763 ret = -EINVAL;
764 }
765
766 vunmap(batch_base);
767
768 i915_gem_object_unpin_pages(batch_obj);
769
770 return ret;
771 }
This page took 0.084715 seconds and 5 git commands to generate.