1@ ScummVM - Graphic Adventure Engine
2@
3@ ScummVM is the legal property of its developers, whose names
4@ are too numerous to list here. Please refer to the COPYRIGHT
5@ file distributed with this source distribution.
6@
7@ This program is free software; you can redistribute it and/or
8@ modify it under the terms of the GNU General Public License
9@ as published by the Free Software Foundation; either version 2
10@ of the License, or (at your option) any later version.
11@
12@ This program is distributed in the hope that it will be useful,
13@ but WITHOUT ANY WARRANTY; without even the implied warranty of
14@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15@ GNU General Public License for more details.
16@
17@ You should have received a copy of the GNU General Public License
18@ along with this program; if not, write to the Free Software
19@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20@
21@ @author Robin Watts (robin@wss.co.uk)
22
23	@ For 16 source pixels 0123456789ABCDEF, we want to produce 11 output
24	@ pixels.
25
26	@0000000000011111111111222222222223333333333344444444444555555555
27	@<------||------><------||------><------||------><------||------>
28
29	@5566666666666777777777778888888888899999999999AAAAAAAAAAABBBBBBB
30	@<------||------><------||------><------||------><------||------>
31
32	@BBBBCCCCCCCCCCCDDDDDDDDDDDEEEEEEEEEEEFFFFFFFFFFF
33	@<------||------><------||------><------||------>
34
35	@ So, use the following weights (approximately right)
36
37	@ d0 = (3*s0 + 1*s1)>>2                Every source pixel constitutes
38	@ d1 = (2*s1 + 2*s2)>>2                3/4 of a destination pixel,
39	@ d2 = (1*s2 + 3*s3)>>2                except for s4,s5,sA and sB which
40	@ d3 = (2*s4 + 2*s5)>>2                constitute 1/2 each.
41	@ d4 = (3*s6 + 1*s7)>>2
42	@ d5 = (2*s7 + 2*s8)>>2
43	@ d6 = (1*s8 + 3*s9)>>2
44	@ d7 = (2*sA + 2*sB)>>2
45	@ d8 = (3*sC + 1*sD)>>2
46	@ d9 = (2*sD + 2*sE)>>2
47	@ dA = (1*sE + 3*sF)>>2
48
49	.text
50
51	.global	SmartphoneLandscapeARM
52
53	@ scales a width x height block of 16bpp pixels from srcPtr to dstPtr,
54	@ scaling each scanline down by 11/16ths. Every 8th scanline is dropped
55	@ srcPitch and dstPitch identify how to reach subsequent lines.
56	@ mask allows for one routine to do both 565 and 565 formats.
57
58SmartphoneLandscapeARM:
59	@ r0 = srcPtr
60	@ r1 = srcSpan
61	@ r2 = dstPtr
62	@ r3 = dstSpan
63	@ <> = width
64	@ <> = height
65	@ <> = mask
66	MOV	r12,r13
67	STMFD	r13!,{r4-r11,r14}
68	LDMFD	r12,{r4,r5,r11}		@ r4 = width
69					@ r5 = height
70					@ r11= mask
71	MOV	r7, #7			@ r7 = line
72	SUB	r8, r1, r4, LSL #1	@ r8 = srcSpan - width*2
73y_loop:
74	MOV	r6, r4			@ r6 = i
75	MOV	r9, r2			@ r9 = dstPtr
76x_loop:
77	LDRH	r14,[r0],#2		@ r14 = s0
78	LDRH	r12,[r0],#2		@ r12 = s1
79	LDRH	r10,[r0],#2		@ r10 = s2
80	ORR	r14,r14,r14,LSL #16	@ r14 = s0s0
81	ORR	r12,r12,r12,LSL #16	@ r12 = s1s1
82	AND	r14,r14,r11		@ r14 = s0 as g_b_r
83	AND	r12,r12,r11		@ r12 = s1 as g_b_r
84	ADD	r14,r14,r14,LSL #1	@ r14 = s0*3 as g_b_r
85	ORR	r10,r10,r10,LSL #16	@ r10 = s2s2
86	ADD	r14,r14,r12		@ r14 = (s0*3 + s1) as g_b_r
87	AND	r10,r10,r11		@ r10 = s2 as g_b_r
88	AND	r14,r11,r14,LSR #2	@ r14 = d0 as g_b_r
89	ORR	r14,r14,r14,LSR #16	@ r14 = d0
90	STRH	r14,[r9],#2		@ store d0
91	ADD	r12,r12,r10		@ r12 = (s1 + s2) as g_b_r
92	LDRH	r14,[r0],#2		@ r14 = s3
93	AND	r12,r11,r12,LSR #1	@ r12 = d1 as g_b_r
94	ORR	r12,r12,r12,LSR #16	@ r12 = d1
95	STRH	r12,[r9],#2		@ store d1
96	ORR	r14,r14,r14,LSL #16	@ r14 = s3s3
97	AND	r14,r14,r11		@ r14 = s3 as g_b_r
98	ADD	r10,r10,r14		@ r10 = (s2 + s3) as g_b_r
99	ADD	r10,r10,r14,LSL #1	@ r10 = (s2 + s3*3) as g_b_r
100	LDRH	r14,[r0],#2		@ r14 = s4
101	LDRH	r12,[r0],#2		@ r12 = s5
102	AND	r10,r11,r10,LSR #2	@ r10 = d2 as g_b_r
103	ORR	r10,r10,r10,LSR #16	@ r10 = d2
104	STRH	r10,[r9],#2		@ store d2
105	ORR	r14,r14,r14,LSL #16	@ r14 = s4s4
106	ORR	r12,r12,r12,LSL #16	@ r12 = s5s5
107	AND	r14,r14,r11		@ r14 = s4 as g_b_r
108	AND	r12,r12,r11		@ r12 = s5 as g_b_r
109	ADD	r14,r14,r12		@ r14 = (s4 + s5) as g_b_r
110	LDRH	r12,[r0],#2		@ r12 = s6
111	LDRH	r10,[r0],#2		@ r10 = s7
112	AND	r14,r11,r14,LSR #1	@ r14 = d3 as g_b_r
113	ORR	r14,r14,r14,LSR #16	@ r14 = d3
114	STRH	r14,[r9],#2		@ store d3
115	ORR	r12,r12,r12,LSL #16	@ r12 = s6s6
116	ORR	r10,r10,r10,LSL #16	@ r10 = s7s7
117	LDRH	r14,[r0],#2		@ r14 = s8
118	AND	r12,r12,r11		@ r12 = s6 as g_b_r
119	AND	r10,r10,r11		@ r10 = s7 as g_b_r
120	ORR	r14,r14,r14,LSL #16	@ r14 = s8s8
121	ADD	r12,r12,r12,LSL #1	@ r12 = 3*s6 as g_b_r
122	AND	r14,r14,r11		@ r14 = s8 as g_b_r
123	ADD	r12,r12,r10		@ r12 = (3*s6+s7) as g_b_r
124	AND	r12,r11,r12,LSR #2	@ r12 = d4 as g_b_r
125	ORR	r12,r12,r12,LSR #16	@ r12 = d4
126	STRH	r12,[r9],#2		@ store d4
127	ADD	r10,r10,r14		@ r10 = (s7+s8) as g_b_r
128	AND	r10,r11,r10,LSR #1	@ r10 = d5 as g_b_r
129	LDRH	r12,[r0],#2		@ r12 = s9
130	ORR	r10,r10,r10,LSR #16	@ r10 = d5
131	STRH	r10,[r9],#2		@ store d5
132	ORR	r12,r12,r12,LSL #16	@ r12 = s9s9
133	AND	r12,r12,r11		@ r12 = s9 as g_b_r
134	ADD	r12,r12,r12,LSL #1	@ r12 = s9*3 as g_b_r
135	ADD	r12,r12,r14		@ r12 = (s8+s9*3) as g_b_r
136	AND	r12,r11,r12,LSR #2	@ r12 = d6 as g_b_r
137	LDRH	r14,[r0],#2		@ r14 = sA
138	LDRH	r10,[r0],#2		@ r10 = sB
139	ORR	r12,r12,r12,LSR #16	@ r12 = d6
140	STRH	r12,[r9],#2		@ store d6
141	ORR	r14,r14,r14,LSL #16	@ r14 = sAsA
142	ORR	r10,r10,r10,LSL #16	@ r10 = sBsB
143	LDRH	r12,[r0],#2		@ r12 = sC
144	AND	r14,r14,r11		@ r14 = sA as g_b_r
145	AND	r10,r10,r11		@ r10 = sB as g_b_r
146	ORR	r12,r12,r12,LSL #16	@ r12 = sCsC
147	ADD	r14,r14,r10		@ r14 = (sA + sB) as g_b_r
148	LDRH	r10,[r0],#2		@ r10 = sD
149	AND	r14,r11,r14,LSR #1	@ r14 = d7 as g_b_r
150	AND	r12,r12,r11		@ r12 = sC as g_b_r
151	ORR	r14,r14,r14,LSR #16	@ r14 = d7
152	ORR	r10,r10,r10,LSL #16	@ r10 = sDsD
153	STRH	r14,[r9],#2		@ store d7
154	AND	r10,r10,r11		@ r10 = sD as g_b_r
155	ADD	r12,r12,r12,LSL #1	@ r12 = 3*sC as g_b_r
156	LDRH	r14,[r0],#2		@ r14 = sE
157	ADD	r12,r12,r10		@ r12 = (3*sC+sD) as g_b_r
158	AND	r12,r11,r12,LSR #2	@ r12 = d8 as g_b_r
159	ORR	r14,r14,r14,LSL #16	@ r14 = sEsE
160	ORR	r12,r12,r12,LSR #16	@ r12 = d8
161	AND	r14,r14,r11		@ r14 = sE as g_b_r
162	STRH	r12,[r9],#2		@ store d8
163	ADD	r10,r10,r14		@ r10 = (sD+sE) as g_b_r
164	LDRH	r12,[r0],#2		@ r12 = sF
165	AND	r10,r11,r10,LSR #1	@ r10 = d9 as g_b_r
166	ORR	r10,r10,r10,LSR #16	@ r10 = d9
167	STRH	r10,[r9],#2		@ store d9
168	ORR	r12,r12,r12,LSL #16	@ r12 = sFsF
169	AND	r12,r12,r11		@ r12 = sF as g_b_r
170	ADD	r12,r12,r12,LSL #1	@ r12 = 3*sF as g_b_r
171	ADD	r12,r12,r14		@ r12 = (sE+3*sF) as g_b_r
172	AND	r12,r11,r12,LSR #2	@ r12 = dA as g_b_r
173	ORR	r12,r12,r12,LSR #16	@ r12 = dA
174	SUBS	r6,r6,#16		@ width -= 16
175	STRH	r12,[r9],#2		@ store dA
176	BGT	x_loop
177
178	ADD	r0, r0, r8		@ srcPtr += srcSpan
179	ADD	r2, r2, r3		@ dstPtr += dstSpan
180	SUBS	r7, r7, #1
181	ADDEQ	r0, r0, r1
182	MOVEQ	r7, #7
183	SUBEQ	r5, r5, #1
184	SUBS	r5, r5, #1
185	BGT	y_loop
186
187	LDMFD	r13!,{r4-r11,PC}
188