1  /***************************************************************************\
2 |*                                                                           *|
3 |*       Copyright 1993-2003 NVIDIA, Corporation.  All rights reserved.      *|
4 |*                                                                           *|
5 |*     NOTICE TO USER:   The source code  is copyrighted under  U.S. and     *|
6 |*     international laws.  Users and possessors of this source code are     *|
7 |*     hereby granted a nonexclusive,  royalty-free copyright license to     *|
8 |*     use this code in individual and commercial software.                  *|
9 |*                                                                           *|
10 |*     Any use of this source code must include,  in the user documenta-     *|
11 |*     tion and  internal comments to the code,  notices to the end user     *|
12 |*     as follows:                                                           *|
13 |*                                                                           *|
14 |*       Copyright 1993-2003 NVIDIA, Corporation.  All rights reserved.      *|
15 |*                                                                           *|
16 |*     NVIDIA, CORPORATION MAKES NO REPRESENTATION ABOUT THE SUITABILITY     *|
17 |*     OF  THIS SOURCE  CODE  FOR ANY PURPOSE.  IT IS  PROVIDED  "AS IS"     *|
18 |*     WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.  NVIDIA, CORPOR-     *|
19 |*     ATION DISCLAIMS ALL WARRANTIES  WITH REGARD  TO THIS SOURCE CODE,     *|
20 |*     INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGE-     *|
21 |*     MENT,  AND FITNESS  FOR A PARTICULAR PURPOSE.   IN NO EVENT SHALL     *|
22 |*     NVIDIA, CORPORATION  BE LIABLE FOR ANY SPECIAL,  INDIRECT,  INCI-     *|
23 |*     DENTAL, OR CONSEQUENTIAL DAMAGES,  OR ANY DAMAGES  WHATSOEVER RE-     *|
24 |*     SULTING FROM LOSS OF USE,  DATA OR PROFITS,  WHETHER IN AN ACTION     *|
25 |*     OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,  ARISING OUT OF     *|
26 |*     OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE.     *|
27 |*                                                                           *|
28 |*     U.S. Government  End  Users.   This source code  is a "commercial     *|
29 |*     item,"  as that  term is  defined at  48 C.F.R. 2.101 (OCT 1995),     *|
30 |*     consisting  of "commercial  computer  software"  and  "commercial     *|
31 |*     computer  software  documentation,"  as such  terms  are  used in     *|
32 |*     48 C.F.R. 12.212 (SEPT 1995)  and is provided to the U.S. Govern-     *|
33 |*     ment only as  a commercial end item.   Consistent with  48 C.F.R.     *|
34 |*     12.212 and  48 C.F.R. 227.7202-1 through  227.7202-4 (JUNE 1995),     *|
35 |*     all U.S. Government End Users  acquire the source code  with only     *|
36 |*     those rights set forth herein.                                        *|
37 |*                                                                           *|
38  \***************************************************************************/
39 
40 /*
41  * GPL Licensing Note - According to Mark Vojkovich, author of the Xorg/
42  * XFree86 'nv' driver, this source code is provided under MIT-style licensing
43  * where the source code is provided "as is" without warranty of any kind.
44  * The only usage restriction is for the copyright notices to be retained
45  * whenever code is used.
46  *
47  * Antonino Daplas <adaplas@pol.net> 2005-03-11
48  */
49 
50 #include <linux/fb.h>
51 #include <linux/nmi.h>
52 
53 #include "nv_type.h"
54 #include "nv_proto.h"
55 #include "nv_dma.h"
56 #include "nv_local.h"
57 
58 /* There is a HW race condition with videoram command buffers.
59    You can't jump to the location of your put offset.  We write put
60    at the jump offset + SKIPS dwords with noop padding in between
61    to solve this problem */
62 #define SKIPS  8
63 
64 static const int NVCopyROP[16] = {
65 	0xCC,			/* copy   */
66 	0x55			/* invert */
67 };
68 
69 static const int NVCopyROP_PM[16] = {
70 	0xCA,			/* copy  */
71 	0x5A,			/* invert */
72 };
73 
nvidiafb_safe_mode(struct fb_info * info)74 static inline void nvidiafb_safe_mode(struct fb_info *info)
75 {
76 	struct nvidia_par *par = info->par;
77 
78 	touch_softlockup_watchdog();
79 	info->pixmap.scan_align = 1;
80 	par->lockup = 1;
81 }
82 
NVFlush(struct fb_info * info)83 static inline void NVFlush(struct fb_info *info)
84 {
85 	struct nvidia_par *par = info->par;
86 	int count = 1000000000;
87 
88 	while (--count && READ_GET(par) != par->dmaPut) ;
89 
90 	if (!count) {
91 		printk("nvidiafb: DMA Flush lockup\n");
92 		nvidiafb_safe_mode(info);
93 	}
94 }
95 
NVSync(struct fb_info * info)96 static inline void NVSync(struct fb_info *info)
97 {
98 	struct nvidia_par *par = info->par;
99 	int count = 1000000000;
100 
101 	while (--count && NV_RD32(par->PGRAPH, 0x0700)) ;
102 
103 	if (!count) {
104 		printk("nvidiafb: DMA Sync lockup\n");
105 		nvidiafb_safe_mode(info);
106 	}
107 }
108 
NVDmaKickoff(struct nvidia_par * par)109 static void NVDmaKickoff(struct nvidia_par *par)
110 {
111 	if (par->dmaCurrent != par->dmaPut) {
112 		par->dmaPut = par->dmaCurrent;
113 		WRITE_PUT(par, par->dmaPut);
114 	}
115 }
116 
NVDmaWait(struct fb_info * info,int size)117 static void NVDmaWait(struct fb_info *info, int size)
118 {
119 	struct nvidia_par *par = info->par;
120 	int dmaGet;
121 	int count = 1000000000, cnt;
122 	size++;
123 
124 	while (par->dmaFree < size && --count && !par->lockup) {
125 		dmaGet = READ_GET(par);
126 
127 		if (par->dmaPut >= dmaGet) {
128 			par->dmaFree = par->dmaMax - par->dmaCurrent;
129 			if (par->dmaFree < size) {
130 				NVDmaNext(par, 0x20000000);
131 				if (dmaGet <= SKIPS) {
132 					if (par->dmaPut <= SKIPS)
133 						WRITE_PUT(par, SKIPS + 1);
134 					cnt = 1000000000;
135 					do {
136 						dmaGet = READ_GET(par);
137 					} while (--cnt && dmaGet <= SKIPS);
138 					if (!cnt) {
139 						printk("DMA Get lockup\n");
140 						par->lockup = 1;
141 					}
142 				}
143 				WRITE_PUT(par, SKIPS);
144 				par->dmaCurrent = par->dmaPut = SKIPS;
145 				par->dmaFree = dmaGet - (SKIPS + 1);
146 			}
147 		} else
148 			par->dmaFree = dmaGet - par->dmaCurrent - 1;
149 	}
150 
151 	if (!count) {
152 		printk("nvidiafb: DMA Wait Lockup\n");
153 		nvidiafb_safe_mode(info);
154 	}
155 }
156 
NVSetPattern(struct fb_info * info,u32 clr0,u32 clr1,u32 pat0,u32 pat1)157 static void NVSetPattern(struct fb_info *info, u32 clr0, u32 clr1,
158 			 u32 pat0, u32 pat1)
159 {
160 	struct nvidia_par *par = info->par;
161 
162 	NVDmaStart(info, par, PATTERN_COLOR_0, 4);
163 	NVDmaNext(par, clr0);
164 	NVDmaNext(par, clr1);
165 	NVDmaNext(par, pat0);
166 	NVDmaNext(par, pat1);
167 }
168 
NVSetRopSolid(struct fb_info * info,u32 rop,u32 planemask)169 static void NVSetRopSolid(struct fb_info *info, u32 rop, u32 planemask)
170 {
171 	struct nvidia_par *par = info->par;
172 
173 	if (planemask != ~0) {
174 		NVSetPattern(info, 0, planemask, ~0, ~0);
175 		if (par->currentRop != (rop + 32)) {
176 			NVDmaStart(info, par, ROP_SET, 1);
177 			NVDmaNext(par, NVCopyROP_PM[rop]);
178 			par->currentRop = rop + 32;
179 		}
180 	} else if (par->currentRop != rop) {
181 		if (par->currentRop >= 16)
182 			NVSetPattern(info, ~0, ~0, ~0, ~0);
183 		NVDmaStart(info, par, ROP_SET, 1);
184 		NVDmaNext(par, NVCopyROP[rop]);
185 		par->currentRop = rop;
186 	}
187 }
188 
NVSetClippingRectangle(struct fb_info * info,int x1,int y1,int x2,int y2)189 static void NVSetClippingRectangle(struct fb_info *info, int x1, int y1,
190 				   int x2, int y2)
191 {
192 	struct nvidia_par *par = info->par;
193 	int h = y2 - y1 + 1;
194 	int w = x2 - x1 + 1;
195 
196 	NVDmaStart(info, par, CLIP_POINT, 2);
197 	NVDmaNext(par, (y1 << 16) | x1);
198 	NVDmaNext(par, (h << 16) | w);
199 }
200 
NVResetGraphics(struct fb_info * info)201 void NVResetGraphics(struct fb_info *info)
202 {
203 	struct nvidia_par *par = info->par;
204 	u32 surfaceFormat, patternFormat, rectFormat, lineFormat;
205 	int pitch, i;
206 
207 	pitch = info->fix.line_length;
208 
209 	par->dmaBase = (u32 __iomem *) (&par->FbStart[par->FbUsableSize]);
210 
211 	for (i = 0; i < SKIPS; i++)
212 		NV_WR32(&par->dmaBase[i], 0, 0x00000000);
213 
214 	NV_WR32(&par->dmaBase[0x0 + SKIPS], 0, 0x00040000);
215 	NV_WR32(&par->dmaBase[0x1 + SKIPS], 0, 0x80000010);
216 	NV_WR32(&par->dmaBase[0x2 + SKIPS], 0, 0x00042000);
217 	NV_WR32(&par->dmaBase[0x3 + SKIPS], 0, 0x80000011);
218 	NV_WR32(&par->dmaBase[0x4 + SKIPS], 0, 0x00044000);
219 	NV_WR32(&par->dmaBase[0x5 + SKIPS], 0, 0x80000012);
220 	NV_WR32(&par->dmaBase[0x6 + SKIPS], 0, 0x00046000);
221 	NV_WR32(&par->dmaBase[0x7 + SKIPS], 0, 0x80000013);
222 	NV_WR32(&par->dmaBase[0x8 + SKIPS], 0, 0x00048000);
223 	NV_WR32(&par->dmaBase[0x9 + SKIPS], 0, 0x80000014);
224 	NV_WR32(&par->dmaBase[0xA + SKIPS], 0, 0x0004A000);
225 	NV_WR32(&par->dmaBase[0xB + SKIPS], 0, 0x80000015);
226 	NV_WR32(&par->dmaBase[0xC + SKIPS], 0, 0x0004C000);
227 	NV_WR32(&par->dmaBase[0xD + SKIPS], 0, 0x80000016);
228 	NV_WR32(&par->dmaBase[0xE + SKIPS], 0, 0x0004E000);
229 	NV_WR32(&par->dmaBase[0xF + SKIPS], 0, 0x80000017);
230 
231 	par->dmaPut = 0;
232 	par->dmaCurrent = 16 + SKIPS;
233 	par->dmaMax = 8191;
234 	par->dmaFree = par->dmaMax - par->dmaCurrent;
235 
236 	switch (info->var.bits_per_pixel) {
237 	case 32:
238 	case 24:
239 		surfaceFormat = SURFACE_FORMAT_DEPTH24;
240 		patternFormat = PATTERN_FORMAT_DEPTH24;
241 		rectFormat = RECT_FORMAT_DEPTH24;
242 		lineFormat = LINE_FORMAT_DEPTH24;
243 		break;
244 	case 16:
245 		surfaceFormat = SURFACE_FORMAT_DEPTH16;
246 		patternFormat = PATTERN_FORMAT_DEPTH16;
247 		rectFormat = RECT_FORMAT_DEPTH16;
248 		lineFormat = LINE_FORMAT_DEPTH16;
249 		break;
250 	default:
251 		surfaceFormat = SURFACE_FORMAT_DEPTH8;
252 		patternFormat = PATTERN_FORMAT_DEPTH8;
253 		rectFormat = RECT_FORMAT_DEPTH8;
254 		lineFormat = LINE_FORMAT_DEPTH8;
255 		break;
256 	}
257 
258 	NVDmaStart(info, par, SURFACE_FORMAT, 4);
259 	NVDmaNext(par, surfaceFormat);
260 	NVDmaNext(par, pitch | (pitch << 16));
261 	NVDmaNext(par, 0);
262 	NVDmaNext(par, 0);
263 
264 	NVDmaStart(info, par, PATTERN_FORMAT, 1);
265 	NVDmaNext(par, patternFormat);
266 
267 	NVDmaStart(info, par, RECT_FORMAT, 1);
268 	NVDmaNext(par, rectFormat);
269 
270 	NVDmaStart(info, par, LINE_FORMAT, 1);
271 	NVDmaNext(par, lineFormat);
272 
273 	par->currentRop = ~0;	/* set to something invalid */
274 	NVSetRopSolid(info, ROP_COPY, ~0);
275 
276 	NVSetClippingRectangle(info, 0, 0, info->var.xres_virtual,
277 			       info->var.yres_virtual);
278 
279 	NVDmaKickoff(par);
280 }
281 
nvidiafb_sync(struct fb_info * info)282 int nvidiafb_sync(struct fb_info *info)
283 {
284 	struct nvidia_par *par = info->par;
285 
286 	if (info->state != FBINFO_STATE_RUNNING)
287 		return 0;
288 
289 	if (!par->lockup)
290 		NVFlush(info);
291 
292 	if (!par->lockup)
293 		NVSync(info);
294 
295 	return 0;
296 }
297 
nvidiafb_copyarea(struct fb_info * info,const struct fb_copyarea * region)298 void nvidiafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
299 {
300 	struct nvidia_par *par = info->par;
301 
302 	if (info->state != FBINFO_STATE_RUNNING)
303 		return;
304 
305 	if (par->lockup) {
306 		cfb_copyarea(info, region);
307 		return;
308 	}
309 
310 	NVDmaStart(info, par, BLIT_POINT_SRC, 3);
311 	NVDmaNext(par, (region->sy << 16) | region->sx);
312 	NVDmaNext(par, (region->dy << 16) | region->dx);
313 	NVDmaNext(par, (region->height << 16) | region->width);
314 
315 	NVDmaKickoff(par);
316 }
317 
nvidiafb_fillrect(struct fb_info * info,const struct fb_fillrect * rect)318 void nvidiafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
319 {
320 	struct nvidia_par *par = info->par;
321 	u32 color;
322 
323 	if (info->state != FBINFO_STATE_RUNNING)
324 		return;
325 
326 	if (par->lockup) {
327 		cfb_fillrect(info, rect);
328 		return;
329 	}
330 
331 	if (info->var.bits_per_pixel == 8)
332 		color = rect->color;
333 	else
334 		color = ((u32 *) info->pseudo_palette)[rect->color];
335 
336 	if (rect->rop != ROP_COPY)
337 		NVSetRopSolid(info, rect->rop, ~0);
338 
339 	NVDmaStart(info, par, RECT_SOLID_COLOR, 1);
340 	NVDmaNext(par, color);
341 
342 	NVDmaStart(info, par, RECT_SOLID_RECTS(0), 2);
343 	NVDmaNext(par, (rect->dx << 16) | rect->dy);
344 	NVDmaNext(par, (rect->width << 16) | rect->height);
345 
346 	NVDmaKickoff(par);
347 
348 	if (rect->rop != ROP_COPY)
349 		NVSetRopSolid(info, ROP_COPY, ~0);
350 }
351 
nvidiafb_mono_color_expand(struct fb_info * info,const struct fb_image * image)352 static void nvidiafb_mono_color_expand(struct fb_info *info,
353 				       const struct fb_image *image)
354 {
355 	struct nvidia_par *par = info->par;
356 	u32 fg, bg, mask = ~(~0 >> (32 - info->var.bits_per_pixel));
357 	u32 dsize, width, *data = (u32 *) image->data, tmp;
358 	int j, k = 0;
359 
360 	width = (image->width + 31) & ~31;
361 	dsize = (width * image->height) >> 5;
362 
363 	if (info->var.bits_per_pixel == 8) {
364 		fg = image->fg_color | mask;
365 		bg = image->bg_color | mask;
366 	} else {
367 		fg = ((u32 *) info->pseudo_palette)[image->fg_color] | mask;
368 		bg = ((u32 *) info->pseudo_palette)[image->bg_color] | mask;
369 	}
370 
371 	NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_CLIP, 7);
372 	NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
373 	NVDmaNext(par, ((image->dy + image->height) << 16) |
374 		  ((image->dx + image->width) & 0xffff));
375 	NVDmaNext(par, bg);
376 	NVDmaNext(par, fg);
377 	NVDmaNext(par, (image->height << 16) | width);
378 	NVDmaNext(par, (image->height << 16) | width);
379 	NVDmaNext(par, (image->dy << 16) | (image->dx & 0xffff));
380 
381 	while (dsize >= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS) {
382 		NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0),
383 			   RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS);
384 
385 		for (j = RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS; j--;) {
386 			tmp = data[k++];
387 			reverse_order(&tmp);
388 			NVDmaNext(par, tmp);
389 		}
390 
391 		dsize -= RECT_EXPAND_TWO_COLOR_DATA_MAX_DWORDS;
392 	}
393 
394 	if (dsize) {
395 		NVDmaStart(info, par, RECT_EXPAND_TWO_COLOR_DATA(0), dsize);
396 
397 		for (j = dsize; j--;) {
398 			tmp = data[k++];
399 			reverse_order(&tmp);
400 			NVDmaNext(par, tmp);
401 		}
402 	}
403 
404 	NVDmaKickoff(par);
405 }
406 
nvidiafb_imageblit(struct fb_info * info,const struct fb_image * image)407 void nvidiafb_imageblit(struct fb_info *info, const struct fb_image *image)
408 {
409 	struct nvidia_par *par = info->par;
410 
411 	if (info->state != FBINFO_STATE_RUNNING)
412 		return;
413 
414 	if (image->depth == 1 && !par->lockup)
415 		nvidiafb_mono_color_expand(info, image);
416 	else
417 		cfb_imageblit(info, image);
418 }
419