xref: /linux/drivers/gpu/drm/drm_flip_work.c (revision 78dfe8a0)
1 /*
2  * Copyright (C) 2013 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <linux/slab.h>
25 
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_print.h>
28 #include <drm/drm_util.h>
29 
30 struct drm_flip_task {
31 	struct list_head node;
32 	void *data;
33 };
34 
drm_flip_work_allocate_task(void * data,gfp_t flags)35 static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
36 {
37 	struct drm_flip_task *task;
38 
39 	task = kzalloc(sizeof(*task), flags);
40 	if (task)
41 		task->data = data;
42 
43 	return task;
44 }
45 
drm_flip_work_queue_task(struct drm_flip_work * work,struct drm_flip_task * task)46 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
47 {
48 	unsigned long flags;
49 
50 	spin_lock_irqsave(&work->lock, flags);
51 	list_add_tail(&task->node, &work->queued);
52 	spin_unlock_irqrestore(&work->lock, flags);
53 }
54 
55 /**
56  * drm_flip_work_queue - queue work
57  * @work: the flip-work
58  * @val: the value to queue
59  *
60  * Queues work, that will later be run (passed back to drm_flip_func_t
61  * func) on a work queue after drm_flip_work_commit() is called.
62  */
drm_flip_work_queue(struct drm_flip_work * work,void * val)63 void drm_flip_work_queue(struct drm_flip_work *work, void *val)
64 {
65 	struct drm_flip_task *task;
66 
67 	task = drm_flip_work_allocate_task(val,
68 				drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
69 	if (task) {
70 		drm_flip_work_queue_task(work, task);
71 	} else {
72 		DRM_ERROR("%s could not allocate task!\n", work->name);
73 		work->func(work, val);
74 	}
75 }
76 EXPORT_SYMBOL(drm_flip_work_queue);
77 
78 /**
79  * drm_flip_work_commit - commit queued work
80  * @work: the flip-work
81  * @wq: the work-queue to run the queued work on
82  *
83  * Trigger work previously queued by drm_flip_work_queue() to run
84  * on a workqueue.  The typical usage would be to queue work (via
85  * drm_flip_work_queue()) at any point (from vblank irq and/or
86  * prior), and then from vblank irq commit the queued work.
87  */
drm_flip_work_commit(struct drm_flip_work * work,struct workqueue_struct * wq)88 void drm_flip_work_commit(struct drm_flip_work *work,
89 		struct workqueue_struct *wq)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave(&work->lock, flags);
94 	list_splice_tail(&work->queued, &work->commited);
95 	INIT_LIST_HEAD(&work->queued);
96 	spin_unlock_irqrestore(&work->lock, flags);
97 	queue_work(wq, &work->worker);
98 }
99 EXPORT_SYMBOL(drm_flip_work_commit);
100 
flip_worker(struct work_struct * w)101 static void flip_worker(struct work_struct *w)
102 {
103 	struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
104 	struct list_head tasks;
105 	unsigned long flags;
106 
107 	while (1) {
108 		struct drm_flip_task *task, *tmp;
109 
110 		INIT_LIST_HEAD(&tasks);
111 		spin_lock_irqsave(&work->lock, flags);
112 		list_splice_tail(&work->commited, &tasks);
113 		INIT_LIST_HEAD(&work->commited);
114 		spin_unlock_irqrestore(&work->lock, flags);
115 
116 		if (list_empty(&tasks))
117 			break;
118 
119 		list_for_each_entry_safe(task, tmp, &tasks, node) {
120 			work->func(work, task->data);
121 			kfree(task);
122 		}
123 	}
124 }
125 
126 /**
127  * drm_flip_work_init - initialize flip-work
128  * @work: the flip-work to initialize
129  * @name: debug name
130  * @func: the callback work function
131  *
132  * Initializes/allocates resources for the flip-work
133  */
drm_flip_work_init(struct drm_flip_work * work,const char * name,drm_flip_func_t func)134 void drm_flip_work_init(struct drm_flip_work *work,
135 		const char *name, drm_flip_func_t func)
136 {
137 	work->name = name;
138 	INIT_LIST_HEAD(&work->queued);
139 	INIT_LIST_HEAD(&work->commited);
140 	spin_lock_init(&work->lock);
141 	work->func = func;
142 
143 	INIT_WORK(&work->worker, flip_worker);
144 }
145 EXPORT_SYMBOL(drm_flip_work_init);
146 
147 /**
148  * drm_flip_work_cleanup - cleans up flip-work
149  * @work: the flip-work to cleanup
150  *
151  * Destroy resources allocated for the flip-work
152  */
drm_flip_work_cleanup(struct drm_flip_work * work)153 void drm_flip_work_cleanup(struct drm_flip_work *work)
154 {
155 	WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
156 }
157 EXPORT_SYMBOL(drm_flip_work_cleanup);
158