1 /*	$NetBSD: vbva_base.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /* Copyright (C) 2006-2017 Oracle Corporation */
5 
6 #include <sys/cdefs.h>
7 __KERNEL_RCSID(0, "$NetBSD: vbva_base.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $");
8 
9 #include <linux/vbox_err.h>
10 #include "vbox_drv.h"
11 #include "vboxvideo_guest.h"
12 #include "hgsmi_channels.h"
13 
14 /*
15  * There is a hardware ring buffer in the graphics device video RAM, formerly
16  * in the VBox VMMDev PCI memory space.
17  * All graphics commands go there serialized by vbva_buffer_begin_update.
18  * and vbva_buffer_end_update.
19  *
20  * free_offset is writing position. data_offset is reading position.
21  * free_offset == data_offset means buffer is empty.
22  * There must be always gap between data_offset and free_offset when data
23  * are in the buffer.
24  * Guest only changes free_offset, host changes data_offset.
25  */
26 
vbva_buffer_available(const struct vbva_buffer * vbva)27 static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
28 {
29 	s32 diff = vbva->data_offset - vbva->free_offset;
30 
31 	return diff > 0 ? diff : vbva->data_len + diff;
32 }
33 
vbva_buffer_place_data_at(struct vbva_buf_ctx * vbva_ctx,const void * p,u32 len,u32 offset)34 static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
35 				      const void *p, u32 len, u32 offset)
36 {
37 	struct vbva_buffer *vbva = vbva_ctx->vbva;
38 	u32 bytes_till_boundary = vbva->data_len - offset;
39 	u8 *dst = &vbva->data[offset];
40 	s32 diff = len - bytes_till_boundary;
41 
42 	if (diff <= 0) {
43 		/* Chunk will not cross buffer boundary. */
44 		memcpy(dst, p, len);
45 	} else {
46 		/* Chunk crosses buffer boundary. */
47 		memcpy(dst, p, bytes_till_boundary);
48 		memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
49 	}
50 }
51 
vbva_buffer_flush(struct gen_pool * ctx)52 static void vbva_buffer_flush(struct gen_pool *ctx)
53 {
54 	struct vbva_flush *p;
55 
56 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
57 	if (!p)
58 		return;
59 
60 	p->reserved = 0;
61 
62 	hgsmi_buffer_submit(ctx, p);
63 	hgsmi_buffer_free(ctx, p);
64 }
65 
vbva_write(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,const void * p,u32 len)66 bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
67 		const void *p, u32 len)
68 {
69 	struct vbva_record *record;
70 	struct vbva_buffer *vbva;
71 	u32 available;
72 
73 	vbva = vbva_ctx->vbva;
74 	record = vbva_ctx->record;
75 
76 	if (!vbva || vbva_ctx->buffer_overflow ||
77 	    !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
78 		return false;
79 
80 	available = vbva_buffer_available(vbva);
81 
82 	while (len > 0) {
83 		u32 chunk = len;
84 
85 		if (chunk >= available) {
86 			vbva_buffer_flush(ctx);
87 			available = vbva_buffer_available(vbva);
88 		}
89 
90 		if (chunk >= available) {
91 			if (WARN_ON(available <= vbva->partial_write_tresh)) {
92 				vbva_ctx->buffer_overflow = true;
93 				return false;
94 			}
95 			chunk = available - vbva->partial_write_tresh;
96 		}
97 
98 		vbva_buffer_place_data_at(vbva_ctx, p, chunk,
99 					  vbva->free_offset);
100 
101 		vbva->free_offset = (vbva->free_offset + chunk) %
102 				    vbva->data_len;
103 		record->len_and_flags += chunk;
104 		available -= chunk;
105 		len -= chunk;
106 		p += chunk;
107 	}
108 
109 	return true;
110 }
111 
vbva_inform_host(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,s32 screen,bool enable)112 static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
113 			     struct gen_pool *ctx, s32 screen, bool enable)
114 {
115 	struct vbva_enable_ex *p;
116 	bool ret;
117 
118 	p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
119 	if (!p)
120 		return false;
121 
122 	p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
123 	p->base.offset = vbva_ctx->buffer_offset;
124 	p->base.result = VERR_NOT_SUPPORTED;
125 	if (screen >= 0) {
126 		p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
127 		p->screen_id = screen;
128 	}
129 
130 	hgsmi_buffer_submit(ctx, p);
131 
132 	if (enable)
133 		ret = p->base.result >= 0;
134 	else
135 		ret = true;
136 
137 	hgsmi_buffer_free(ctx, p);
138 
139 	return ret;
140 }
141 
vbva_enable(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,struct vbva_buffer * vbva,s32 screen)142 bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
143 		 struct vbva_buffer *vbva, s32 screen)
144 {
145 	bool ret = false;
146 
147 	memset(vbva, 0, sizeof(*vbva));
148 	vbva->partial_write_tresh = 256;
149 	vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
150 	vbva_ctx->vbva = vbva;
151 
152 	ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
153 	if (!ret)
154 		vbva_disable(vbva_ctx, ctx, screen);
155 
156 	return ret;
157 }
158 
vbva_disable(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx,s32 screen)159 void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
160 		  s32 screen)
161 {
162 	vbva_ctx->buffer_overflow = false;
163 	vbva_ctx->record = NULL;
164 	vbva_ctx->vbva = NULL;
165 
166 	vbva_inform_host(vbva_ctx, ctx, screen, false);
167 }
168 
vbva_buffer_begin_update(struct vbva_buf_ctx * vbva_ctx,struct gen_pool * ctx)169 bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
170 			      struct gen_pool *ctx)
171 {
172 	struct vbva_record *record;
173 	u32 next;
174 
175 	if (!vbva_ctx->vbva ||
176 	    !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
177 		return false;
178 
179 	WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
180 
181 	next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
182 
183 	/* Flush if all slots in the records queue are used */
184 	if (next == vbva_ctx->vbva->record_first_index)
185 		vbva_buffer_flush(ctx);
186 
187 	/* If even after flush there is no place then fail the request */
188 	if (next == vbva_ctx->vbva->record_first_index)
189 		return false;
190 
191 	record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
192 	record->len_and_flags = VBVA_F_RECORD_PARTIAL;
193 	vbva_ctx->vbva->record_free_index = next;
194 	/* Remember which record we are using. */
195 	vbva_ctx->record = record;
196 
197 	return true;
198 }
199 
vbva_buffer_end_update(struct vbva_buf_ctx * vbva_ctx)200 void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
201 {
202 	struct vbva_record *record = vbva_ctx->record;
203 
204 	WARN_ON(!vbva_ctx->vbva || !record ||
205 		!(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
206 
207 	/* Mark the record completed. */
208 	record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
209 
210 	vbva_ctx->buffer_overflow = false;
211 	vbva_ctx->record = NULL;
212 }
213 
vbva_setup_buffer_context(struct vbva_buf_ctx * vbva_ctx,u32 buffer_offset,u32 buffer_length)214 void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
215 			       u32 buffer_offset, u32 buffer_length)
216 {
217 	vbva_ctx->buffer_offset = buffer_offset;
218 	vbva_ctx->buffer_length = buffer_length;
219 }
220