1 /*	$NetBSD: vmwgfx_cmdbuf.c,v 1.7 2022/10/25 23:35:29 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5  *
6  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.7 2022/10/25 23:35:29 riastradh Exp $");
32 
33 #include <linux/dmapool.h>
34 #include <linux/pci.h>
35 
36 #include <drm/ttm/ttm_bo_api.h>
37 
38 #include "vmwgfx_drv.h"
39 
40 #include <linux/nbsd-namespace.h>
41 
42 /*
43  * Size of inline command buffers. Try to make sure that a page size is a
44  * multiple of the DMA pool allocation size.
45  */
46 #define VMW_CMDBUF_INLINE_ALIGN 64
47 #define VMW_CMDBUF_INLINE_SIZE \
48 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
49 
50 /**
51  * struct vmw_cmdbuf_context - Command buffer context queues
52  *
53  * @submitted: List of command buffers that have been submitted to the
54  * manager but not yet submitted to hardware.
55  * @hw_submitted: List of command buffers submitted to hardware.
56  * @preempted: List of preempted command buffers.
57  * @num_hw_submitted: Number of buffers currently being processed by hardware
58  */
59 struct vmw_cmdbuf_context {
60 	struct list_head submitted;
61 	struct list_head hw_submitted;
62 	struct list_head preempted;
63 	unsigned num_hw_submitted;
64 	bool block_submission;
65 };
66 
67 /**
68  * struct vmw_cmdbuf_man: - Command buffer manager
69  *
70  * @cur_mutex: Mutex protecting the command buffer used for incremental small
71  * kernel command submissions, @cur.
72  * @space_mutex: Mutex to protect against starvation when we allocate
73  * main pool buffer space.
74  * @error_mutex: Mutex to serialize the work queue error handling.
75  * Note this is not needed if the same workqueue handler
76  * can't race with itself...
77  * @work: A struct work_struct implementeing command buffer error handling.
78  * Immutable.
79  * @dev_priv: Pointer to the device private struct. Immutable.
80  * @ctx: Array of command buffer context queues. The queues and the context
81  * data is protected by @lock.
82  * @error: List of command buffers that have caused device errors.
83  * Protected by @lock.
84  * @mm: Range manager for the command buffer space. Manager allocations and
85  * frees are protected by @lock.
86  * @cmd_space: Buffer object for the command buffer space, unless we were
87  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
88  * @map_obj: Mapping state for @cmd_space. Immutable.
89  * @map: Pointer to command buffer space. May be a mapped buffer object or
90  * a contigous coherent DMA memory allocation. Immutable.
91  * @cur: Command buffer for small kernel command submissions. Protected by
92  * the @cur_mutex.
93  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
94  * @default_size: Default size for the @cur command buffer. Immutable.
95  * @max_hw_submitted: Max number of in-flight command buffers the device can
96  * handle. Immutable.
97  * @lock: Spinlock protecting command submission queues.
98  * @header: Pool of DMA memory for device command buffer headers.
99  * Internal protection.
100  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
101  * space for inline data. Internal protection.
102  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
103  * space.
104  * @idle_queue: Wait queue for processes waiting for command buffer idle.
105  * @irq_on: Whether the process function has requested irq to be turned on.
106  * Protected by @lock.
107  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
108  * allocation. Immutable.
109  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
110  * Typically this is false only during bootstrap.
111  * @handle: DMA address handle for the command buffer space if @using_mob is
112  * false. Immutable.
113  * @size: The size of the command buffer space. Immutable.
114  * @num_contexts: Number of contexts actually enabled.
115  */
116 struct vmw_cmdbuf_man {
117 	struct mutex cur_mutex;
118 	struct mutex space_mutex;
119 	struct mutex error_mutex;
120 	struct work_struct work;
121 	struct vmw_private *dev_priv;
122 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
123 	struct list_head error;
124 	struct drm_mm mm;
125 	struct ttm_buffer_object *cmd_space;
126 	struct ttm_bo_kmap_obj map_obj;
127 	u8 *map;
128 	struct vmw_cmdbuf_header *cur;
129 	size_t cur_pos;
130 	size_t default_size;
131 	unsigned max_hw_submitted;
132 	spinlock_t lock;
133 	struct dma_pool *headers;
134 	struct dma_pool *dheaders;
135 	drm_waitqueue_t alloc_queue;
136 	drm_waitqueue_t idle_queue;
137 	bool irq_on;
138 	bool using_mob;
139 	bool has_pool;
140 #ifdef __NetBSD__
141 	bus_dmamap_t dmamap;
142 	bus_dma_segment_t dmaseg;
143 #endif
144 	dma_addr_t handle;
145 	size_t size;
146 	u32 num_contexts;
147 };
148 
149 /**
150  * struct vmw_cmdbuf_header - Command buffer metadata
151  *
152  * @man: The command buffer manager.
153  * @cb_header: Device command buffer header, allocated from a DMA pool.
154  * @cb_context: The device command buffer context.
155  * @list: List head for attaching to the manager lists.
156  * @node: The range manager node.
157  * @handle. The DMA address of @cb_header. Handed to the device on command
158  * buffer submission.
159  * @cmd: Pointer to the command buffer space of this buffer.
160  * @size: Size of the command buffer space of this buffer.
161  * @reserved: Reserved space of this buffer.
162  * @inline_space: Whether inline command buffer space is used.
163  */
164 struct vmw_cmdbuf_header {
165 	struct vmw_cmdbuf_man *man;
166 	SVGACBHeader *cb_header;
167 	SVGACBContext cb_context;
168 	struct list_head list;
169 	struct drm_mm_node node;
170 	dma_addr_t handle;
171 	u8 *cmd;
172 	size_t size;
173 	size_t reserved;
174 	bool inline_space;
175 };
176 
177 /**
178  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
179  * command buffer space.
180  *
181  * @cb_header: Device command buffer header.
182  * @cmd: Inline command buffer space.
183  */
184 struct vmw_cmdbuf_dheader {
185 	SVGACBHeader cb_header;
186 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
187 };
188 
189 /**
190  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
191  *
192  * @page_size: Size of requested command buffer space in pages.
193  * @node: Pointer to the range manager node.
194  * @done: True if this allocation has succeeded.
195  */
196 struct vmw_cmdbuf_alloc_info {
197 	size_t page_size;
198 	struct drm_mm_node *node;
199 	bool done;
200 };
201 
202 /* Loop over each context in the command buffer manager. */
203 #define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
204 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
205 	     ++(_i), ++(_ctx))
206 
207 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
208 				bool enable);
209 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
210 
211 /**
212  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
213  *
214  * @man: The range manager.
215  * @interruptible: Whether to wait interruptible when locking.
216  */
vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man * man,bool interruptible)217 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
218 {
219 	if (interruptible) {
220 		if (mutex_lock_interruptible(&man->cur_mutex))
221 			return -ERESTARTSYS;
222 	} else {
223 		mutex_lock(&man->cur_mutex);
224 	}
225 
226 	return 0;
227 }
228 
229 /**
230  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
231  *
232  * @man: The range manager.
233  */
vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man * man)234 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
235 {
236 	mutex_unlock(&man->cur_mutex);
237 }
238 
239 /**
240  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
241  * been used for the device context with inline command buffers.
242  * Need not be called locked.
243  *
244  * @header: Pointer to the header to free.
245  */
vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header * header)246 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
247 {
248 	struct vmw_cmdbuf_dheader *dheader;
249 
250 	if (WARN_ON_ONCE(!header->inline_space))
251 		return;
252 
253 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
254 			       cb_header);
255 	dma_pool_free(header->man->dheaders, dheader, header->handle);
256 	kfree(header);
257 }
258 
259 /**
260  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
261  * associated structures.
262  *
263  * header: Pointer to the header to free.
264  *
265  * For internal use. Must be called with man::lock held.
266  */
__vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)267 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
268 {
269 	struct vmw_cmdbuf_man *man = header->man;
270 
271 	lockdep_assert_held_once(&man->lock);
272 
273 	if (header->inline_space) {
274 		vmw_cmdbuf_header_inline_free(header);
275 		return;
276 	}
277 
278 	drm_mm_remove_node(&header->node);
279 	DRM_SPIN_WAKEUP_ALL(&man->alloc_queue, &man->lock); /* XXX */
280 	if (header->cb_header)
281 		dma_pool_free(man->headers, header->cb_header,
282 			      header->handle);
283 	kfree(header);
284 }
285 
286 /**
287  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
288  * associated structures.
289  *
290  * @header: Pointer to the header to free.
291  */
vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)292 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
293 {
294 	struct vmw_cmdbuf_man *man = header->man;
295 
296 	/* Avoid locking if inline_space */
297 	if (header->inline_space) {
298 		vmw_cmdbuf_header_inline_free(header);
299 		return;
300 	}
301 	spin_lock(&man->lock);
302 	__vmw_cmdbuf_header_free(header);
303 	spin_unlock(&man->lock);
304 }
305 
306 
307 /**
308  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
309  *
310  * @header: The header of the buffer to submit.
311  */
vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header * header)312 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
313 {
314 	struct vmw_cmdbuf_man *man = header->man;
315 	u32 val;
316 
317 	val = upper_32_bits(header->handle);
318 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
319 
320 	val = lower_32_bits(header->handle);
321 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
322 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
323 
324 	return header->cb_header->status;
325 }
326 
327 /**
328  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
329  *
330  * @ctx: The command buffer context to initialize
331  */
vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context * ctx)332 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
333 {
334 	INIT_LIST_HEAD(&ctx->hw_submitted);
335 	INIT_LIST_HEAD(&ctx->submitted);
336 	INIT_LIST_HEAD(&ctx->preempted);
337 	ctx->num_hw_submitted = 0;
338 }
339 
340 /**
341  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
342  * context.
343  *
344  * @man: The command buffer manager.
345  * @ctx: The command buffer context.
346  *
347  * Submits command buffers to hardware until there are no more command
348  * buffers to submit or the hardware can't handle more command buffers.
349  */
vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx)350 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
351 				  struct vmw_cmdbuf_context *ctx)
352 {
353 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
354 	       !list_empty(&ctx->submitted) &&
355 	       !ctx->block_submission) {
356 		struct vmw_cmdbuf_header *entry;
357 		SVGACBStatus status;
358 
359 		entry = list_first_entry(&ctx->submitted,
360 					 struct vmw_cmdbuf_header,
361 					 list);
362 
363 		status = vmw_cmdbuf_header_submit(entry);
364 
365 		/* This should never happen */
366 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
367 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
368 			break;
369 		}
370 
371 		list_del(&entry->list);
372 		list_add_tail(&entry->list, &ctx->hw_submitted);
373 		ctx->num_hw_submitted++;
374 	}
375 
376 }
377 
378 /**
379  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
380  *
381  * @man: The command buffer manager.
382  * @ctx: The command buffer context.
383  *
384  * Submit command buffers to hardware if possible, and process finished
385  * buffers. Typically freeing them, but on preemption or error take
386  * appropriate action. Wake up waiters if appropriate.
387  */
vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx,int * notempty)388 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
389 				   struct vmw_cmdbuf_context *ctx,
390 				   int *notempty)
391 {
392 	struct vmw_cmdbuf_header *entry, *next;
393 
394 	assert_spin_locked(&man->lock);
395 
396 	vmw_cmdbuf_ctx_submit(man, ctx);
397 
398 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
399 		SVGACBStatus status = entry->cb_header->status;
400 
401 		if (status == SVGA_CB_STATUS_NONE)
402 			break;
403 
404 		list_del(&entry->list);
405 		DRM_SPIN_WAKEUP_ONE(&man->idle_queue, &man->lock);
406 		ctx->num_hw_submitted--;
407 		switch (status) {
408 		case SVGA_CB_STATUS_COMPLETED:
409 			__vmw_cmdbuf_header_free(entry);
410 			break;
411 		case SVGA_CB_STATUS_COMMAND_ERROR:
412 			WARN_ONCE(true, "Command buffer error.\n");
413 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
414 			list_add_tail(&entry->list, &man->error);
415 			schedule_work(&man->work);
416 			break;
417 		case SVGA_CB_STATUS_PREEMPTED:
418 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
419 			list_add_tail(&entry->list, &ctx->preempted);
420 			break;
421 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
422 			WARN_ONCE(true, "Command buffer header error.\n");
423 			__vmw_cmdbuf_header_free(entry);
424 			break;
425 		default:
426 			WARN_ONCE(true, "Undefined command buffer status.\n");
427 			__vmw_cmdbuf_header_free(entry);
428 			break;
429 		}
430 	}
431 
432 	vmw_cmdbuf_ctx_submit(man, ctx);
433 	if (!list_empty(&ctx->submitted))
434 		(*notempty)++;
435 }
436 
437 /**
438  * vmw_cmdbuf_man_process - Process all command buffer contexts and
439  * switch on and off irqs as appropriate.
440  *
441  * @man: The command buffer manager.
442  *
443  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
444  * command buffers left that are not submitted to hardware, Make sure
445  * IRQ handling is turned on. Otherwise, make sure it's turned off.
446  */
vmw_cmdbuf_man_process(struct vmw_cmdbuf_man * man)447 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
448 {
449 	int notempty;
450 	struct vmw_cmdbuf_context *ctx;
451 	int i;
452 
453 	assert_spin_locked(&man->lock);
454 
455 retry:
456 	notempty = 0;
457 	for_each_cmdbuf_ctx(man, i, ctx)
458 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
459 
460 	if (man->irq_on && !notempty) {
461 		vmw_generic_waiter_remove(man->dev_priv,
462 					  SVGA_IRQFLAG_COMMAND_BUFFER,
463 					  &man->dev_priv->cmdbuf_waiters);
464 		man->irq_on = false;
465 	} else if (!man->irq_on && notempty) {
466 		vmw_generic_waiter_add(man->dev_priv,
467 				       SVGA_IRQFLAG_COMMAND_BUFFER,
468 				       &man->dev_priv->cmdbuf_waiters);
469 		man->irq_on = true;
470 
471 		/* Rerun in case we just missed an irq. */
472 		goto retry;
473 	}
474 }
475 
476 /**
477  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
478  * command buffer context
479  *
480  * @man: The command buffer manager.
481  * @header: The header of the buffer to submit.
482  * @cb_context: The command buffer context to use.
483  *
484  * This function adds @header to the "submitted" queue of the command
485  * buffer context identified by @cb_context. It then calls the command buffer
486  * manager processing to potentially submit the buffer to hardware.
487  * @man->lock needs to be held when calling this function.
488  */
vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,SVGACBContext cb_context)489 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
490 			       struct vmw_cmdbuf_header *header,
491 			       SVGACBContext cb_context)
492 {
493 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
494 		header->cb_header->dxContext = 0;
495 	header->cb_context = cb_context;
496 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
497 
498 	vmw_cmdbuf_man_process(man);
499 }
500 
501 /**
502  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
503  * handler implemented as a threaded irq task.
504  *
505  * @man: Pointer to the command buffer manager.
506  *
507  * The bottom half of the interrupt handler simply calls into the
508  * command buffer processor to free finished buffers and submit any
509  * queued buffers to hardware.
510  */
vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man * man)511 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
512 {
513 	spin_lock(&man->lock);
514 	vmw_cmdbuf_man_process(man);
515 	spin_unlock(&man->lock);
516 }
517 
518 /**
519  * vmw_cmdbuf_work_func - The deferred work function that handles
520  * command buffer errors.
521  *
522  * @work: The work func closure argument.
523  *
524  * Restarting the command buffer context after an error requires process
525  * context, so it is deferred to this work function.
526  */
vmw_cmdbuf_work_func(struct work_struct * work)527 static void vmw_cmdbuf_work_func(struct work_struct *work)
528 {
529 	struct vmw_cmdbuf_man *man =
530 		container_of(work, struct vmw_cmdbuf_man, work);
531 	struct vmw_cmdbuf_header *entry, *next;
532 	uint32_t dummy;
533 	bool send_fence = false;
534 	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
535 	int i;
536 	struct vmw_cmdbuf_context *ctx;
537 	bool global_block = false;
538 
539 	for_each_cmdbuf_ctx(man, i, ctx)
540 		INIT_LIST_HEAD(&restart_head[i]);
541 
542 	mutex_lock(&man->error_mutex);
543 	spin_lock(&man->lock);
544 	list_for_each_entry_safe(entry, next, &man->error, list) {
545 		SVGACBHeader *cb_hdr = entry->cb_header;
546 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
547 			(entry->cmd + cb_hdr->errorOffset);
548 		u32 error_cmd_size, new_start_offset;
549 		const char *cmd_name;
550 
551 		list_del_init(&entry->list);
552 		global_block = true;
553 
554 		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
555 			VMW_DEBUG_USER("Unknown command causing device error.\n");
556 			VMW_DEBUG_USER("Command buffer offset is %lu\n",
557 				       (unsigned long) cb_hdr->errorOffset);
558 			__vmw_cmdbuf_header_free(entry);
559 			send_fence = true;
560 			continue;
561 		}
562 
563 		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
564 			       cmd_name);
565 		VMW_DEBUG_USER("Command buffer offset is %lu\n",
566 			       (unsigned long) cb_hdr->errorOffset);
567 		VMW_DEBUG_USER("Command size is %lu\n",
568 			       (unsigned long) error_cmd_size);
569 
570 		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
571 
572 		if (new_start_offset >= cb_hdr->length) {
573 			__vmw_cmdbuf_header_free(entry);
574 			send_fence = true;
575 			continue;
576 		}
577 
578 		if (man->using_mob)
579 			cb_hdr->ptr.mob.mobOffset += new_start_offset;
580 		else
581 			cb_hdr->ptr.pa += (u64) new_start_offset;
582 
583 		entry->cmd += new_start_offset;
584 		cb_hdr->length -= new_start_offset;
585 		cb_hdr->errorOffset = 0;
586 		cb_hdr->offset = 0;
587 
588 		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
589 	}
590 
591 	for_each_cmdbuf_ctx(man, i, ctx)
592 		man->ctx[i].block_submission = true;
593 
594 	spin_unlock(&man->lock);
595 
596 	/* Preempt all contexts */
597 	if (global_block && vmw_cmdbuf_preempt(man, 0))
598 		DRM_ERROR("Failed preempting command buffer contexts\n");
599 
600 	spin_lock(&man->lock);
601 	for_each_cmdbuf_ctx(man, i, ctx) {
602 		/* Move preempted command buffers to the preempted queue. */
603 		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
604 
605 		/*
606 		 * Add the preempted queue after the command buffer
607 		 * that caused an error.
608 		 */
609 		list_splice_init(&ctx->preempted, restart_head[i].prev);
610 
611 		/*
612 		 * Finally add all command buffers first in the submitted
613 		 * queue, to rerun them.
614 		 */
615 
616 		ctx->block_submission = false;
617 		list_splice_init(&restart_head[i], &ctx->submitted);
618 	}
619 
620 	vmw_cmdbuf_man_process(man);
621 	spin_unlock(&man->lock);
622 
623 	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
624 		DRM_ERROR("Failed restarting command buffer contexts\n");
625 
626 	/* Send a new fence in case one was removed */
627 	if (send_fence) {
628 		vmw_fifo_send_fence(man->dev_priv, &dummy);
629 		spin_lock(&man->lock);
630 		DRM_SPIN_WAKEUP_ALL(&man->idle_queue, &man->lock);
631 		spin_unlock(&man->lock);
632 	}
633 
634 	mutex_unlock(&man->error_mutex);
635 }
636 
637 /**
638  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
639  *
640  * @man: The command buffer manager.
641  * @check_preempted: Check also the preempted queue for pending command buffers.
642  *
643  */
vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man * man,bool check_preempted)644 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
645 				bool check_preempted)
646 {
647 	struct vmw_cmdbuf_context *ctx;
648 	bool idle = false;
649 	int i;
650 
651 	assert_spin_locked(&man->lock);
652 
653 	vmw_cmdbuf_man_process(man);
654 	for_each_cmdbuf_ctx(man, i, ctx) {
655 		if (!list_empty(&ctx->submitted) ||
656 		    !list_empty(&ctx->hw_submitted) ||
657 		    (check_preempted && !list_empty(&ctx->preempted)))
658 			goto out;
659 	}
660 
661 	idle = list_empty(&man->error);
662 
663 out:
664 	return idle;
665 }
666 
667 /**
668  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
669  * command submissions
670  *
671  * @man: The command buffer manager.
672  *
673  * Flushes the current command buffer without allocating a new one. A new one
674  * is automatically allocated when needed. Call with @man->cur_mutex held.
675  */
__vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man)676 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
677 {
678 	struct vmw_cmdbuf_header *cur = man->cur;
679 
680 	lockdep_assert_held_once(&man->cur_mutex);
681 
682 	if (!cur)
683 		return;
684 
685 	spin_lock(&man->lock);
686 	if (man->cur_pos == 0) {
687 		__vmw_cmdbuf_header_free(cur);
688 		goto out_unlock;
689 	}
690 
691 	man->cur->cb_header->length = man->cur_pos;
692 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
693 out_unlock:
694 	spin_unlock(&man->lock);
695 	man->cur = NULL;
696 	man->cur_pos = 0;
697 }
698 
699 /**
700  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
701  * command submissions
702  *
703  * @man: The command buffer manager.
704  * @interruptible: Whether to sleep interruptible when sleeping.
705  *
706  * Flushes the current command buffer without allocating a new one. A new one
707  * is automatically allocated when needed.
708  */
vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man,bool interruptible)709 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
710 			 bool interruptible)
711 {
712 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
713 
714 	if (ret)
715 		return ret;
716 
717 	__vmw_cmdbuf_cur_flush(man);
718 	vmw_cmdbuf_cur_unlock(man);
719 
720 	return 0;
721 }
722 
723 /**
724  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
725  *
726  * @man: The command buffer manager.
727  * @interruptible: Sleep interruptible while waiting.
728  * @timeout: Time out after this many ticks.
729  *
730  * Wait until the command buffer manager has processed all command buffers,
731  * or until a timeout occurs. If a timeout occurs, the function will return
732  * -EBUSY.
733  */
vmw_cmdbuf_idle(struct vmw_cmdbuf_man * man,bool interruptible,unsigned long timeout)734 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
735 		    unsigned long timeout)
736 {
737 	int ret;
738 
739 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
740 	spin_lock(&man->lock);
741 	vmw_generic_waiter_add(man->dev_priv,
742 			       SVGA_IRQFLAG_COMMAND_BUFFER,
743 			       &man->dev_priv->cmdbuf_waiters);
744 	if (interruptible) {
745 		DRM_SPIN_TIMED_WAIT_UNTIL(ret, &man->idle_queue, &man->lock,
746 		    timeout, vmw_cmdbuf_man_idle(man, true));
747 	} else {
748 		DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &man->idle_queue,
749 		    &man->lock,
750 		    timeout, vmw_cmdbuf_man_idle(man, true));
751 	}
752 	vmw_generic_waiter_remove(man->dev_priv,
753 				  SVGA_IRQFLAG_COMMAND_BUFFER,
754 				  &man->dev_priv->cmdbuf_waiters);
755 	if (ret == 0) {
756 		if (!vmw_cmdbuf_man_idle(man, true))
757 			ret = -EBUSY;
758 		else
759 			ret = 0;
760 	}
761 	spin_unlock(&man->lock);
762 	if (ret > 0)
763 		ret = 0;
764 
765 	return ret;
766 }
767 
768 /**
769  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
770  *
771  * @man: The command buffer manager.
772  * @info: Allocation info. Will hold the size on entry and allocated mm node
773  * on successful return.
774  *
775  * Try to allocate buffer space from the main pool. Returns true if succeeded.
776  * If a fatal error was hit, the error code is returned in @info->ret.
777  */
vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_alloc_info * info)778 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
779 				 struct vmw_cmdbuf_alloc_info *info)
780 {
781 	int ret;
782 
783 	if (info->done)
784 		return true;
785 
786 	memset(info->node, 0, sizeof(*info->node));
787 	spin_lock(&man->lock);
788 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
789 	if (ret) {
790 		vmw_cmdbuf_man_process(man);
791 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
792 	}
793 
794 	spin_unlock(&man->lock);
795 	info->done = !ret;
796 
797 	return info->done;
798 }
799 
800 /**
801  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
802  *
803  * @man: The command buffer manager.
804  * @node: Pointer to pre-allocated range-manager node.
805  * @size: The size of the allocation.
806  * @interruptible: Whether to sleep interruptible while waiting for space.
807  *
808  * This function allocates buffer space from the main pool, and if there is
809  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
810  * become available.
811  */
vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man * man,struct drm_mm_node * node,size_t size,bool interruptible)812 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
813 				  struct drm_mm_node *node,
814 				  size_t size,
815 				  bool interruptible)
816 {
817 	struct vmw_cmdbuf_alloc_info info;
818 
819 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
820 	info.node = node;
821 	info.done = false;
822 
823 	/*
824 	 * To prevent starvation of large requests, only one allocating call
825 	 * at a time waiting for space.
826 	 */
827 	if (interruptible) {
828 		if (mutex_lock_interruptible(&man->space_mutex))
829 			return -ERESTARTSYS;
830 	} else {
831 		mutex_lock(&man->space_mutex);
832 	}
833 	spin_lock(&man->lock);
834 
835 	/* Try to allocate space without waiting. */
836 	if (vmw_cmdbuf_try_alloc(man, &info))
837 		goto out_unlock;
838 
839 	vmw_generic_waiter_add(man->dev_priv,
840 			       SVGA_IRQFLAG_COMMAND_BUFFER,
841 			       &man->dev_priv->cmdbuf_waiters);
842 
843 	if (interruptible) {
844 		int ret;
845 
846 		DRM_SPIN_WAIT_UNTIL(ret, &man->alloc_queue, &man->lock,
847 		    vmw_cmdbuf_try_alloc(man, &info));
848 		if (ret) {
849 			vmw_generic_waiter_remove
850 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
851 				 &man->dev_priv->cmdbuf_waiters);
852 			spin_unlock(&man->lock);
853 			mutex_unlock(&man->space_mutex);
854 			return ret;
855 		}
856 	} else {
857 		int ret;
858 
859 		DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &man->alloc_queue, &man->lock,
860 		    vmw_cmdbuf_try_alloc(man, &info));
861 		BUG_ON(ret);
862 	}
863 	vmw_generic_waiter_remove(man->dev_priv,
864 				  SVGA_IRQFLAG_COMMAND_BUFFER,
865 				  &man->dev_priv->cmdbuf_waiters);
866 
867 out_unlock:
868 	spin_unlock(&man->lock);
869 	mutex_unlock(&man->space_mutex);
870 
871 	return 0;
872 }
873 
874 /**
875  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
876  * space from the main pool.
877  *
878  * @man: The command buffer manager.
879  * @header: Pointer to the header to set up.
880  * @size: The requested size of the buffer space.
881  * @interruptible: Whether to sleep interruptible while waiting for space.
882  */
vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,size_t size,bool interruptible)883 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
884 				 struct vmw_cmdbuf_header *header,
885 				 size_t size,
886 				 bool interruptible)
887 {
888 	SVGACBHeader *cb_hdr;
889 	size_t offset;
890 	int ret;
891 
892 	if (!man->has_pool)
893 		return -ENOMEM;
894 
895 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
896 
897 	if (ret)
898 		return ret;
899 
900 	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
901 					    &header->handle);
902 	if (!header->cb_header) {
903 		ret = -ENOMEM;
904 		goto out_no_cb_header;
905 	}
906 
907 	header->size = header->node.size << PAGE_SHIFT;
908 	cb_hdr = header->cb_header;
909 	offset = header->node.start << PAGE_SHIFT;
910 	header->cmd = man->map + offset;
911 	if (man->using_mob) {
912 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
913 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
914 		cb_hdr->ptr.mob.mobOffset = offset;
915 	} else {
916 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
917 	}
918 
919 	return 0;
920 
921 out_no_cb_header:
922 	spin_lock(&man->lock);
923 	drm_mm_remove_node(&header->node);
924 	spin_unlock(&man->lock);
925 
926 	return ret;
927 }
928 
929 /**
930  * vmw_cmdbuf_space_inline - Set up a command buffer header with
931  * inline command buffer space.
932  *
933  * @man: The command buffer manager.
934  * @header: Pointer to the header to set up.
935  * @size: The requested size of the buffer space.
936  */
vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,int size)937 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
938 				   struct vmw_cmdbuf_header *header,
939 				   int size)
940 {
941 	struct vmw_cmdbuf_dheader *dheader;
942 	SVGACBHeader *cb_hdr;
943 
944 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
945 		return -ENOMEM;
946 
947 	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
948 				  &header->handle);
949 	if (!dheader)
950 		return -ENOMEM;
951 
952 	header->inline_space = true;
953 	header->size = VMW_CMDBUF_INLINE_SIZE;
954 	cb_hdr = &dheader->cb_header;
955 	header->cb_header = cb_hdr;
956 	header->cmd = dheader->cmd;
957 	cb_hdr->status = SVGA_CB_STATUS_NONE;
958 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
959 	cb_hdr->ptr.pa = (u64)header->handle +
960 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
961 
962 	return 0;
963 }
964 
965 /**
966  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
967  * command buffer space.
968  *
969  * @man: The command buffer manager.
970  * @size: The requested size of the buffer space.
971  * @interruptible: Whether to sleep interruptible while waiting for space.
972  * @p_header: points to a header pointer to populate on successful return.
973  *
974  * Returns a pointer to command buffer space if successful. Otherwise
975  * returns an error pointer. The header pointer returned in @p_header should
976  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
977  */
vmw_cmdbuf_alloc(struct vmw_cmdbuf_man * man,size_t size,bool interruptible,struct vmw_cmdbuf_header ** p_header)978 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
979 		       size_t size, bool interruptible,
980 		       struct vmw_cmdbuf_header **p_header)
981 {
982 	struct vmw_cmdbuf_header *header;
983 	int ret = 0;
984 
985 	*p_header = NULL;
986 
987 	header = kzalloc(sizeof(*header), GFP_KERNEL);
988 	if (!header)
989 		return ERR_PTR(-ENOMEM);
990 
991 	if (size <= VMW_CMDBUF_INLINE_SIZE)
992 		ret = vmw_cmdbuf_space_inline(man, header, size);
993 	else
994 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
995 
996 	if (ret) {
997 		kfree(header);
998 		return ERR_PTR(ret);
999 	}
1000 
1001 	header->man = man;
1002 	INIT_LIST_HEAD(&header->list);
1003 	header->cb_header->status = SVGA_CB_STATUS_NONE;
1004 	*p_header = header;
1005 
1006 	return header->cmd;
1007 }
1008 
1009 /**
1010  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
1011  * command buffer.
1012  *
1013  * @man: The command buffer manager.
1014  * @size: The requested size of the commands.
1015  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1016  * @interruptible: Whether to sleep interruptible while waiting for space.
1017  *
1018  * Returns a pointer to command buffer space if successful. Otherwise
1019  * returns an error pointer.
1020  */
vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible)1021 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1022 				    size_t size,
1023 				    int ctx_id,
1024 				    bool interruptible)
1025 {
1026 	struct vmw_cmdbuf_header *cur;
1027 	void *ret;
1028 
1029 	if (vmw_cmdbuf_cur_lock(man, interruptible))
1030 		return ERR_PTR(-ERESTARTSYS);
1031 
1032 	cur = man->cur;
1033 	if (cur && (size + man->cur_pos > cur->size ||
1034 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1035 		     ctx_id != cur->cb_header->dxContext)))
1036 		__vmw_cmdbuf_cur_flush(man);
1037 
1038 	if (!man->cur) {
1039 		ret = vmw_cmdbuf_alloc(man,
1040 				       max_t(size_t, size, man->default_size),
1041 				       interruptible, &man->cur);
1042 		if (IS_ERR(ret)) {
1043 			vmw_cmdbuf_cur_unlock(man);
1044 			return ret;
1045 		}
1046 
1047 		cur = man->cur;
1048 	}
1049 
1050 	if (ctx_id != SVGA3D_INVALID_ID) {
1051 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1052 		cur->cb_header->dxContext = ctx_id;
1053 	}
1054 
1055 	cur->reserved = size;
1056 
1057 	return (void *) (man->cur->cmd + man->cur_pos);
1058 }
1059 
1060 /**
1061  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1062  *
1063  * @man: The command buffer manager.
1064  * @size: The size of the commands actually written.
1065  * @flush: Whether to flush the command buffer immediately.
1066  */
vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man * man,size_t size,bool flush)1067 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1068 				  size_t size, bool flush)
1069 {
1070 	struct vmw_cmdbuf_header *cur = man->cur;
1071 
1072 	lockdep_assert_held_once(&man->cur_mutex);
1073 
1074 	WARN_ON(size > cur->reserved);
1075 	man->cur_pos += size;
1076 	if (!size)
1077 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1078 	if (flush)
1079 		__vmw_cmdbuf_cur_flush(man);
1080 	vmw_cmdbuf_cur_unlock(man);
1081 }
1082 
1083 /**
1084  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1085  *
1086  * @man: The command buffer manager.
1087  * @size: The requested size of the commands.
1088  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1089  * @interruptible: Whether to sleep interruptible while waiting for space.
1090  * @header: Header of the command buffer. NULL if the current command buffer
1091  * should be used.
1092  *
1093  * Returns a pointer to command buffer space if successful. Otherwise
1094  * returns an error pointer.
1095  */
vmw_cmdbuf_reserve(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible,struct vmw_cmdbuf_header * header)1096 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1097 			 int ctx_id, bool interruptible,
1098 			 struct vmw_cmdbuf_header *header)
1099 {
1100 	if (!header)
1101 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1102 
1103 	if (size > header->size)
1104 		return ERR_PTR(-EINVAL);
1105 
1106 	if (ctx_id != SVGA3D_INVALID_ID) {
1107 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1108 		header->cb_header->dxContext = ctx_id;
1109 	}
1110 
1111 	header->reserved = size;
1112 	return header->cmd;
1113 }
1114 
1115 /**
1116  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1117  *
1118  * @man: The command buffer manager.
1119  * @size: The size of the commands actually written.
1120  * @header: Header of the command buffer. NULL if the current command buffer
1121  * should be used.
1122  * @flush: Whether to flush the command buffer immediately.
1123  */
vmw_cmdbuf_commit(struct vmw_cmdbuf_man * man,size_t size,struct vmw_cmdbuf_header * header,bool flush)1124 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1125 		       struct vmw_cmdbuf_header *header, bool flush)
1126 {
1127 	if (!header) {
1128 		vmw_cmdbuf_commit_cur(man, size, flush);
1129 		return;
1130 	}
1131 
1132 	(void) vmw_cmdbuf_cur_lock(man, false);
1133 	__vmw_cmdbuf_cur_flush(man);
1134 	WARN_ON(size > header->reserved);
1135 	man->cur = header;
1136 	man->cur_pos = size;
1137 	if (!size)
1138 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1139 	if (flush)
1140 		__vmw_cmdbuf_cur_flush(man);
1141 	vmw_cmdbuf_cur_unlock(man);
1142 }
1143 
1144 
1145 /**
1146  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1147  *
1148  * @man: The command buffer manager.
1149  * @command: Pointer to the command to send.
1150  * @size: Size of the command.
1151  *
1152  * Synchronously sends a device context command.
1153  */
vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man * man,const void * command,size_t size)1154 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1155 					  const void *command,
1156 					  size_t size)
1157 {
1158 	struct vmw_cmdbuf_header *header;
1159 	int status;
1160 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1161 
1162 	if (IS_ERR(cmd))
1163 		return PTR_ERR(cmd);
1164 
1165 	memcpy(cmd, command, size);
1166 	header->cb_header->length = size;
1167 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1168 	spin_lock(&man->lock);
1169 	status = vmw_cmdbuf_header_submit(header);
1170 	spin_unlock(&man->lock);
1171 	vmw_cmdbuf_header_free(header);
1172 
1173 	if (status != SVGA_CB_STATUS_COMPLETED) {
1174 		DRM_ERROR("Device context command failed with status %d\n",
1175 			  status);
1176 		return -EINVAL;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 /**
1183  * vmw_cmdbuf_preempt - Send a preempt command through the device
1184  * context.
1185  *
1186  * @man: The command buffer manager.
1187  *
1188  * Synchronously sends a preempt command.
1189  */
vmw_cmdbuf_preempt(struct vmw_cmdbuf_man * man,u32 context)1190 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1191 {
1192 	struct {
1193 		uint32 id;
1194 		SVGADCCmdPreempt body;
1195 	} __packed cmd;
1196 
1197 	cmd.id = SVGA_DC_CMD_PREEMPT;
1198 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1199 	cmd.body.ignoreIDZero = 0;
1200 
1201 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1202 }
1203 
1204 
1205 /**
1206  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1207  * context.
1208  *
1209  * @man: The command buffer manager.
1210  * @enable: Whether to enable or disable the context.
1211  *
1212  * Synchronously sends a device start / stop context command.
1213  */
vmw_cmdbuf_startstop(struct vmw_cmdbuf_man * man,u32 context,bool enable)1214 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1215 				bool enable)
1216 {
1217 	struct {
1218 		uint32 id;
1219 		SVGADCCmdStartStop body;
1220 	} __packed cmd;
1221 
1222 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1223 	cmd.body.enable = (enable) ? 1 : 0;
1224 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1225 
1226 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1227 }
1228 
1229 /**
1230  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1231  *
1232  * @man: The command buffer manager.
1233  * @size: The size of the main space pool.
1234  * @default_size: The default size of the command buffer for small kernel
1235  * submissions.
1236  *
1237  * Set the size and allocate the main command buffer space pool,
1238  * as well as the default size of the command buffer for
1239  * small kernel submissions. If successful, this enables large command
1240  * submissions. Note that this function requires that rudimentary command
1241  * submission is already available and that the MOB memory manager is alive.
1242  * Returns 0 on success. Negative error code on failure.
1243  */
vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man * man,size_t size,size_t default_size)1244 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1245 			     size_t size, size_t default_size)
1246 {
1247 	struct vmw_private *dev_priv = man->dev_priv;
1248 	bool dummy;
1249 	int ret;
1250 
1251 	if (man->has_pool)
1252 		return -EINVAL;
1253 
1254 	/* First, try to allocate a huge chunk of DMA memory */
1255 	size = PAGE_ALIGN(size);
1256 #ifdef __NetBSD__
1257 	int error, nseg, alloced = 0,  mapped = 0, loaded = 0;
1258 
1259 	do {
1260 		error = bus_dmamap_create(dev_priv->dev->dmat, size, 1, size,
1261 		    0, BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK, &man->dmamap);
1262 		if (error)
1263 			break;
1264 		error = bus_dmamem_alloc(dev_priv->dev->dmat, size, 1, 0,
1265 		    &man->dmaseg, 1, &nseg, BUS_DMA_WAITOK);
1266 		if (error)
1267 			break;
1268 		KASSERT(nseg == 1);
1269 		alloced = 1;
1270 		error = bus_dmamem_map(dev_priv->dev->dmat, &man->dmaseg, 1,
1271 		    size, (void *)&man->map, BUS_DMA_COHERENT|BUS_DMA_WAITOK);
1272 		if (error)
1273 			break;
1274 		mapped = 1;
1275 		error = bus_dmamap_load(dev_priv->dev->dmat, man->dmamap,
1276 		    man->map, size, NULL, BUS_DMA_WAITOK);
1277 		if (error)
1278 			break;
1279 		loaded = 1;
1280 	} while (0);
1281 	if (error) {
1282 		if (loaded)
1283 			bus_dmamap_unload(dev_priv->dev->dmat, man->dmamap);
1284 		if (mapped)
1285 			bus_dmamem_unmap(dev_priv->dev->dmat, man->map, size);
1286 		if (alloced)
1287 			bus_dmamem_free(dev_priv->dev->dmat, &man->dmaseg, 1);
1288 		if (man->dmamap)
1289 			bus_dmamap_destroy(dev_priv->dev->dmat, man->dmamap);
1290 		man->map = NULL;
1291 	}
1292 #else
1293 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1294 				      &man->handle, GFP_KERNEL);
1295 #endif
1296 	if (man->map) {
1297 		man->using_mob = false;
1298 	} else {
1299 		/*
1300 		 * DMA memory failed. If we can have command buffers in a
1301 		 * MOB, try to use that instead. Note that this will
1302 		 * actually call into the already enabled manager, when
1303 		 * binding the MOB.
1304 		 */
1305 		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1306 			return -ENOMEM;
1307 
1308 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1309 				    &vmw_mob_ne_placement, 0, false,
1310 				    &man->cmd_space);
1311 		if (ret)
1312 			return ret;
1313 
1314 		man->using_mob = true;
1315 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1316 				  &man->map_obj);
1317 		if (ret)
1318 			goto out_no_map;
1319 
1320 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1321 	}
1322 
1323 	man->size = size;
1324 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1325 
1326 	man->has_pool = true;
1327 
1328 	/*
1329 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1330 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1331 	 * needs to wait for space and we block on further command
1332 	 * submissions to be able to free up space.
1333 	 */
1334 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1335 	DRM_INFO("Using command buffers with %s pool.\n",
1336 		 (man->using_mob) ? "MOB" : "DMA");
1337 
1338 	return 0;
1339 
1340 out_no_map:
1341 	if (man->using_mob) {
1342 		ttm_bo_put(man->cmd_space);
1343 		man->cmd_space = NULL;
1344 	}
1345 
1346 	return ret;
1347 }
1348 
1349 /**
1350  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1351  * inline command buffer submissions only.
1352  *
1353  * @dev_priv: Pointer to device private structure.
1354  *
1355  * Returns a pointer to a cummand buffer manager to success or error pointer
1356  * on failure. The command buffer manager will be enabled for submissions of
1357  * size VMW_CMDBUF_INLINE_SIZE only.
1358  */
vmw_cmdbuf_man_create(struct vmw_private * dev_priv)1359 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1360 {
1361 	struct vmw_cmdbuf_man *man;
1362 	struct vmw_cmdbuf_context *ctx;
1363 	unsigned int i;
1364 	int ret;
1365 
1366 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1367 		return ERR_PTR(-ENOSYS);
1368 
1369 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1370 	if (!man)
1371 		return ERR_PTR(-ENOMEM);
1372 
1373 	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1374 		2 : 1;
1375 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1376 #ifdef __NetBSD__
1377 				       dev_priv->dev->dmat,
1378 #else
1379 				       &dev_priv->dev->pdev->dev,
1380 #endif
1381 				       sizeof(SVGACBHeader),
1382 				       64, PAGE_SIZE);
1383 	if (!man->headers) {
1384 		ret = -ENOMEM;
1385 		goto out_no_pool;
1386 	}
1387 
1388 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1389 #ifdef __NetBSD__
1390 					dev_priv->dev->dmat,
1391 #else
1392 					&dev_priv->dev->pdev->dev,
1393 #endif
1394 					sizeof(struct vmw_cmdbuf_dheader),
1395 					64, PAGE_SIZE);
1396 	if (!man->dheaders) {
1397 		ret = -ENOMEM;
1398 		goto out_no_dpool;
1399 	}
1400 
1401 	for_each_cmdbuf_ctx(man, i, ctx)
1402 		vmw_cmdbuf_ctx_init(ctx);
1403 
1404 	INIT_LIST_HEAD(&man->error);
1405 	spin_lock_init(&man->lock);
1406 	mutex_init(&man->cur_mutex);
1407 	mutex_init(&man->space_mutex);
1408 	mutex_init(&man->error_mutex);
1409 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1410 	DRM_INIT_WAITQUEUE(&man->alloc_queue, "vmwgfxaq");
1411 	DRM_INIT_WAITQUEUE(&man->idle_queue, "vmwgfxiq");
1412 	man->dev_priv = dev_priv;
1413 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1414 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1415 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1416 			       &dev_priv->error_waiters);
1417 	ret = vmw_cmdbuf_startstop(man, 0, true);
1418 	if (ret) {
1419 		DRM_ERROR("Failed starting command buffer contexts\n");
1420 		vmw_cmdbuf_man_destroy(man);
1421 		return ERR_PTR(ret);
1422 	}
1423 
1424 	return man;
1425 
1426 out_no_dpool:
1427 	dma_pool_destroy(man->headers);
1428 out_no_pool:
1429 	kfree(man);
1430 
1431 	return ERR_PTR(ret);
1432 }
1433 
1434 /**
1435  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1436  *
1437  * @man: Pointer to a command buffer manager.
1438  *
1439  * This function removes the main buffer space pool, and should be called
1440  * before MOB memory management is removed. When this function has been called,
1441  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1442  * less are allowed, and the default size of the command buffer for small kernel
1443  * submissions is also set to this size.
1444  */
vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man * man)1445 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1446 {
1447 	if (!man->has_pool)
1448 		return;
1449 
1450 	man->has_pool = false;
1451 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1452 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1453 	if (man->using_mob) {
1454 		(void) ttm_bo_kunmap(&man->map_obj);
1455 		ttm_bo_put(man->cmd_space);
1456 		man->cmd_space = NULL;
1457 	} else {
1458 #ifdef __NetBSD__
1459 		const bus_dma_tag_t dmat = man->dev_priv->dev->dmat;
1460 		bus_dmamap_unload(dmat, man->dmamap);
1461 		bus_dmamem_unmap(dmat, man->map, man->size);
1462 		bus_dmamem_free(dmat, &man->dmaseg, 1);
1463 		bus_dmamap_destroy(dmat, man->dmamap);
1464 #else
1465 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1466 				  man->size, man->map, man->handle);
1467 #endif
1468 	}
1469 }
1470 
1471 /**
1472  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1473  *
1474  * @man: Pointer to a command buffer manager.
1475  *
1476  * This function idles and then destroys a command buffer manager.
1477  */
vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man * man)1478 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1479 {
1480 	WARN_ON_ONCE(man->has_pool);
1481 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1482 
1483 	if (vmw_cmdbuf_startstop(man, 0, false))
1484 		DRM_ERROR("Failed stopping command buffer contexts.\n");
1485 
1486 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1487 				  &man->dev_priv->error_waiters);
1488 	(void) cancel_work_sync(&man->work);
1489 	dma_pool_destroy(man->dheaders);
1490 	dma_pool_destroy(man->headers);
1491 	DRM_DESTROY_WAITQUEUE(&man->idle_queue);
1492 	DRM_DESTROY_WAITQUEUE(&man->alloc_queue);
1493 	mutex_destroy(&man->cur_mutex);
1494 	mutex_destroy(&man->space_mutex);
1495 	mutex_destroy(&man->error_mutex);
1496 	spin_lock_destroy(&man->lock);
1497 	kfree(man);
1498 }
1499