1 /*
2 
3 Copyright (C) 2015-2018 Night Dive Studios, LLC.
4 
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation, either version 3 of the License, or
8 (at your option) any later version.
9 
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 GNU General Public License for more details.
14 
15 You should have received a copy of the GNU General Public License
16 along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 
18 */
19 /*
20  * $Source: r:/prj/lib/src/2d/RCS/fl8bldbl.c $
21  * $Revision: 1.3 $
22  * $Author: kevin $
23  * $Date: 1994/12/01 14:59:38 $
24  *
25  * $Log: fl8bldbl.c $
26  * Revision 1.3  1994/12/01  14:59:38  kevin
27  * Added sub/bitmap blending routines.
28  *
29  * Revision 1.2  1994/09/08  00:01:07  kevin
30  * removed smooth_hv_doubler (replaced by asm version).
31  *
32  * Revision 1.1  1994/03/14  17:51:09  kevin
33  * Initial revision
34  *
35  */
36 
37 #include "blncon.h"
38 #include "blndat.h"
39 #include "blnfcn.h"
40 #include "cnvdat.h"
41 #include "grs.h"
42 #include "lg.h"
43 #include <string.h>
44 
flat8_flat8_v_double_ubitmap(grs_bitmap * bm)45 void flat8_flat8_v_double_ubitmap(grs_bitmap *bm) {
46     int i, j, bpv, row, b_row; /* loop controls, bottom pixel value */
47     uchar *src = bm->bits, *dst = grd_bm.bits, *src_nxt, *dst_nxt;
48     int dst_skip = grd_bm.row - bm->w, src_skip = bm->row - bm->w;
49     uchar *local_grd_half_blend;
50 
51     local_grd_half_blend = grd_half_blend;
52     row = grd_bm.row;
53     b_row = bm->row;
54 
55     LG_memcpy(dst, src, bm->w);     /* first copy the top line */
56     for (i = 0; i < bm->h - 1; i++) /* for each row source, 2 destination */
57     {
58         src_nxt = src + b_row;      /* next line of source */
59         dst += row;                 /* interpolated row */
60         dst_nxt = dst + row;        /* next clone row */
61         for (j = 0; j < bm->w; j++) /* all pixels in vertical clone */
62         {
63             *dst++ = local_grd_half_blend[((bpv = *src_nxt++) << 8) | (*src++)];
64             *dst_nxt++ = bpv; /* is this faster than another memcpy? in asm probably? */
65         }
66         dst += dst_skip;
67         src += src_skip; /* get to the next line */
68     }
69 #ifdef FULL_FILL
70     memset(dst + row, 0, bm->w);
71 #endif
72 }
73 
74 #define QSB_SIZE 4
75 #define LOG_QSB_SIZE 2
76 #define D_ROW 8 * QSB_SIZE
77 
78 uchar *grd_last_sub_bm;
79 uchar grd_sub_bm_buffer[D_ROW * D_ROW];
80 
81 void gri_flat8_hv_quadruple_sub_bitmap(grs_bitmap *src_bm, grs_bitmap *dst_bm, int u, int v);
gri_flat8_hv_quadruple_sub_bitmap(grs_bitmap * src_bm,grs_bitmap * dst_bm,int u,int v)82 void gri_flat8_hv_quadruple_sub_bitmap(grs_bitmap *src_bm, grs_bitmap *dst_bm, int u, int v) {
83     int i, j, full_h_blend;
84     uchar *src, *dst;
85 
86     if (grd_log_blend_levels != 2) {
87         gr_free_blend();
88         gr_init_blend(2);
89     }
90 
91     /* initialize destination bitmap parameters. */
92     dst_bm->bits = grd_sub_bm_buffer;
93     dst_bm->h = D_ROW;
94     dst_bm->row = dst_bm->w = D_ROW;
95     dst_bm->hlog = dst_bm->wlog = LOG_QSB_SIZE + 3;
96 
97     /* get pointer to sub bitmap bits */
98     src = src_bm->bits + u + src_bm->row * v;
99 
100     /* If we just did this bitmap, no need to do it again! */
101     if (src == grd_last_sub_bm)
102         return;
103     grd_last_sub_bm = src;
104 
105     /* Fill in the middle of the destination bitmap */
106     dst = dst_bm->bits + (D_ROW + 1) * (D_ROW / 4);
107 
108     if (v + QSB_SIZE < src_bm->h)
109         full_h_blend = 1;
110     else
111         full_h_blend = 0;
112 
113     /* First horizontally blend the source bitmap into every fourth destination
114      * bitmap row. */
115     for (i = 0; i < QSB_SIZE + full_h_blend; i++) {
116         for (j = 0; j < QSB_SIZE; j++) {
117             if (u + j + 1 >= src_bm->w) {
118                 /* if we're at the right edge of the source bitmap, just copy the right
119                  * pixel.*/
120                 dst[0] = dst[1] = dst[2] = dst[3] = src[j];
121                 dst += 4;
122             } else {
123                 int k = (src[j + 1]) | (src[j] << 8);
124                 dst[0] = src[j];
125                 dst[1] = grd_blend[k];
126                 dst[2] = grd_blend[k + GR_BLEND_TABLE_SIZE];
127                 dst[3] = grd_blend[k + 2 * GR_BLEND_TABLE_SIZE];
128                 dst += 4;
129             }
130         }
131         dst += 4 * (D_ROW - QSB_SIZE);
132         src += src_bm->row;
133     }
134 
135     /* Now verticaly blend the destination colums. */
136     dst = dst_bm->bits + (D_ROW + 1) * (D_ROW / 4);
137     for (i = 0; i < QSB_SIZE + full_h_blend - 1; i++) {
138         for (j = 0; j < 4 * QSB_SIZE; j++) {
139             int k = (dst[j + 4 * D_ROW]) | (dst[j] << 8);
140             dst[j + D_ROW] = grd_blend[k];
141             dst[j + 2 * D_ROW] = grd_blend[k + GR_BLEND_TABLE_SIZE];
142             dst[j + 3 * D_ROW] = grd_blend[k + 2 * GR_BLEND_TABLE_SIZE];
143         }
144         dst += 4 * D_ROW;
145     }
146 
147     /* if we're at the bottom edge of the source bitmap, just copy the bottom row
148      * 3 times. */
149     if (full_h_blend == 0) {
150         LG_memcpy(dst + D_ROW, dst, 4 * QSB_SIZE);
151         LG_memcpy(dst + 2 * D_ROW, dst, 4 * QSB_SIZE);
152         LG_memcpy(dst + 3 * D_ROW, dst, 4 * QSB_SIZE);
153     }
154 
155     /* copy the top row to fill out the top of the dest. */
156     dst = dst_bm->bits + (D_ROW + 1) * (D_ROW / 4);
157     for (i = 0; i < D_ROW / 4; i++) {
158         LG_memcpy(dst - D_ROW, dst, 4 * QSB_SIZE);
159         dst -= D_ROW;
160     }
161     /* copy the bottom row to fill out the bottom of the dest. */
162     dst = dst_bm->bits + (3 * D_ROW + 1) * (D_ROW / 4);
163     for (i = 0; i < D_ROW / 4; i++) {
164         LG_memcpy(dst, dst - D_ROW, 4 * QSB_SIZE);
165         dst += D_ROW;
166     }
167     /* copy the right and left colums to fill out the right and left edges. */
168     dst = dst_bm->bits;
169     for (i = 0; i < D_ROW; i++) {
170         memset(dst, dst[D_ROW / 4], D_ROW / 4);
171         memset(dst + 3 * D_ROW / 4, dst[(3 * D_ROW / 4) - 1], D_ROW / 4);
172         dst += D_ROW;
173     }
174 }
175 
176 #define DSB_SIZE 8
177 #define LOG_DSB_SIZE 3
178 
179 void gri_flat8_hv_double_sub_bitmap(grs_bitmap *src_bm, grs_bitmap *dst_bm, int u, int v);
gri_flat8_hv_double_sub_bitmap(grs_bitmap * src_bm,grs_bitmap * dst_bm,int u,int v)180 void gri_flat8_hv_double_sub_bitmap(grs_bitmap *src_bm, grs_bitmap *dst_bm, int u, int v) {
181     int i, j, full_h_blend;
182     uchar *src, *dst;
183 
184     if (grd_log_blend_levels != 2) {
185         gr_free_blend();
186         gr_init_blend(2);
187     }
188 
189     /* initialize destination bitmap parameters. */
190     dst_bm->bits = grd_sub_bm_buffer;
191     dst_bm->h = D_ROW;
192     dst_bm->row = dst_bm->w = D_ROW;
193     dst_bm->hlog = dst_bm->wlog = LOG_DSB_SIZE + 2;
194 
195     /* get pointer to sub bitmap bits */
196     src = src_bm->bits + u + src_bm->row * v;
197 
198     /* If we just did this bitmap, no need to do it again! */
199     if (src == grd_last_sub_bm)
200         return;
201     grd_last_sub_bm = src;
202 
203     /* Fill in the middle of the destination bitmap */
204     dst = dst_bm->bits + (D_ROW + 1) * (D_ROW / 4);
205 
206     if (v + DSB_SIZE < src_bm->h)
207         full_h_blend = 1;
208     else
209         full_h_blend = 0;
210 
211     /* First horizontally blend the source bitmap into every other destination
212      * bitmap row. */
213     for (i = 0; i < DSB_SIZE + full_h_blend; i++) {
214         for (j = 0; j < DSB_SIZE; j++) {
215             if (u + j + 1 >= src_bm->w) {
216                 /* if we're at the right edge of the source bitmap, just copy the right
217                  * pixel.*/
218                 dst[0] = dst[1] = src[j];
219                 dst += 2;
220             } else {
221                 int k = (src[j + 1]) | (src[j] << 8);
222                 dst[0] = src[j];
223                 dst[1] = grd_half_blend[k];
224                 dst += 2;
225             }
226         }
227         dst += 2 * (D_ROW - DSB_SIZE);
228         src += src_bm->row;
229     }
230 
231     /* Now verticaly blend the destination colums. */
232     dst = dst_bm->bits + (D_ROW + 1) * (D_ROW / 4);
233     for (i = 0; i < DSB_SIZE + full_h_blend - 1; i++) {
234         for (j = 0; j < 2 * DSB_SIZE; j++) {
235             int k = (dst[j + 2 * D_ROW]) | (dst[j] << 8);
236             dst[j + D_ROW] = grd_half_blend[k];
237         }
238         dst += 2 * D_ROW;
239     }
240 
241     /* if we're at the bottom edge of the source bitmap, just copy the bottom row
242      * 3 times. */
243     if (full_h_blend == 0)
244         LG_memcpy(dst + D_ROW, dst, 4 * QSB_SIZE);
245 
246     // all this is unnecessary if we're just doing linear maps.
247     //   /* copy the top row to fill out the top of the dest. */
248     //   dst=dst_bm->bits+(D_ROW+1)*(D_ROW/4);
249     //   for (i=0;i<D_ROW/4;i++) {
250     //      memcpy(dst-D_ROW,dst,2*DSB_SIZE);
251     //      dst-=D_ROW;
252     //   }
253     //   /* copy the bottom row to fill out the bottom of the dest. */
254     //   dst=dst_bm->bits+(3*D_ROW+1)*(D_ROW/4);
255     //   for (i=0;i<D_ROW/4;i++) {
256     //      memcpy(dst,dst-D_ROW,2*DSB_SIZE);
257     //      dst+=D_ROW;
258     //   }
259     //   /* copy the right and left colums to fill out the right and left edges.
260     //   */ dst=dst_bm->bits; for (i=0;i<D_ROW;i++) {
261     //      memset(dst,dst[D_ROW/4],D_ROW/4);
262     //      memset(dst+3*D_ROW/4,dst[(3*D_ROW/4)-1],D_ROW/4);
263     //      dst+=D_ROW;
264     //   }
265 }
266