1 /* K=9 r=1/3 Viterbi decoder for PowerPC G4/G5 Altivec vector instructions
2 * 8-bit offset-binary soft decision samples
3 * Copyright Aug 2006, Phil Karn, KA9Q
4 * May be used under the terms of the GNU Lesser General Public License (LGPL)
5 */
6 #include <altivec.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <memory.h>
10 #include <limits.h>
11 #include "fec.h"
12
13 typedef union { unsigned char c[2][16]; vector unsigned char v[2]; } decision_t;
14 typedef union { unsigned short s[256]; vector unsigned short v[32]; } metric_t;
15
16 static union branchtab39 { unsigned short s[128]; vector unsigned short v[16];} Branchtab39[3];
17 static int Init = 0;
18
19 /* State info for instance of Viterbi decoder */
20 struct v39 {
21 metric_t metrics1; /* path metric buffer 1 */
22 metric_t metrics2; /* path metric buffer 2 */
23 void *dp; /* Pointer to current decision */
24 metric_t *old_metrics,*new_metrics; /* Pointers to path metrics, swapped on every bit */
25 void *decisions; /* Beginning of decisions for block */
26 };
27
28 /* Initialize Viterbi decoder for start of new frame */
init_viterbi39_av(void * p,int starting_state)29 int init_viterbi39_av(void *p,int starting_state){
30 struct v39 *vp = p;
31 int i;
32
33 for(i=0;i<32;i++)
34 vp->metrics1.v[i] = (vector unsigned short){1000};
35
36 vp->old_metrics = &vp->metrics1;
37 vp->new_metrics = &vp->metrics2;
38 vp->dp = vp->decisions;
39 vp->old_metrics->s[starting_state & 255] = 0; /* Bias known start state */
40 return 0;
41 }
42
set_viterbi39_polynomial_av(int polys[3])43 void set_viterbi39_polynomial_av(int polys[3]){
44 int state;
45
46 for(state=0;state < 128;state++){
47 Branchtab39[0].s[state] = (polys[0] < 0) ^ parity((2*state) & abs(polys[0])) ? 255 : 0;
48 Branchtab39[1].s[state] = (polys[1] < 0) ^ parity((2*state) & abs(polys[1])) ? 255 : 0;
49 Branchtab39[2].s[state] = (polys[2] < 0) ^ parity((2*state) & abs(polys[2])) ? 255 : 0;
50 }
51 Init++;
52 }
53
54 /* Create a new instance of a Viterbi decoder */
create_viterbi39_av(int len)55 void *create_viterbi39_av(int len){
56 struct v39 *vp;
57
58 if(!Init){
59 int polys[3] = { V39POLYA, V39POLYB, V39POLYC };
60
61 set_viterbi39_polynomial_av(polys);
62 }
63 vp = (struct v39 *)malloc(sizeof(struct v39));
64 vp->decisions = malloc(sizeof(decision_t)*(len+8));
65 init_viterbi39_av(vp,0);
66 return vp;
67 }
68
69 /* Viterbi chainback */
chainback_viterbi39_av(void * p,unsigned char * data,unsigned int nbits,unsigned int endstate)70 int chainback_viterbi39_av(
71 void *p,
72 unsigned char *data, /* Decoded output data */
73 unsigned int nbits, /* Number of data bits */
74 unsigned int endstate){ /* Terminal encoder state */
75 struct v39 *vp = p;
76 decision_t *d = (decision_t *)vp->decisions;
77 int path_metric;
78
79 /* Make room beyond the end of the encoder register so we can
80 * accumulate a full byte of decoded data
81 */
82 endstate %= 256;
83
84 path_metric = vp->old_metrics->s[endstate];
85
86 /* The store into data[] only needs to be done every 8 bits.
87 * But this avoids a conditional branch, and the writes will
88 * combine in the cache anyway
89 */
90 d += 8; /* Look past tail */
91 while(nbits-- != 0){
92 int k;
93
94 k = (d[nbits].c[endstate >> 7][endstate & 15] & (0x80 >> ((endstate>>4)&7)) ) ? 1 : 0;
95 endstate = (k << 7) | (endstate >> 1);
96 data[nbits>>3] = endstate;
97 }
98 return path_metric;
99 }
100
101 /* Delete instance of a Viterbi decoder */
delete_viterbi39_av(void * p)102 void delete_viterbi39_av(void *p){
103 struct v39 *vp = p;
104
105 if(vp != NULL){
106 free(vp->decisions);
107 free(vp);
108 }
109 }
110
update_viterbi39_blk_av(void * p,unsigned char * syms,int nbits)111 int update_viterbi39_blk_av(void *p,unsigned char *syms,int nbits){
112 struct v39 *vp = p;
113 decision_t *d = (decision_t *)vp->dp;
114 int path_metric = 0;
115 vector unsigned char decisions = (vector unsigned char){0};
116
117 while(nbits--){
118 vector unsigned short symv,sym0v,sym1v,sym2v;
119 vector unsigned char s;
120 void *tmp;
121 int i;
122
123 /* Splat the 0th symbol across sym0v, the 1st symbol across sym1v, etc */
124 s = (vector unsigned char)vec_perm(vec_ld(0,syms),vec_ld(5,syms),vec_lvsl(0,syms));
125
126 symv = (vector unsigned short)vec_mergeh((vector unsigned char){0},s); /* Unsigned byte->word unpack */
127 sym0v = vec_splat(symv,0);
128 sym1v = vec_splat(symv,1);
129 sym2v = vec_splat(symv,2);
130 syms += 3;
131
132 for(i=0;i<16;i++){
133 vector bool short decision0,decision1;
134 vector unsigned short metric,m_metric,m0,m1,m2,m3,survivor0,survivor1;
135
136 /* Form branch metrics
137 * Because Branchtab takes on values 0 and 255, and the values of sym?v are offset binary in the range 0-255,
138 * the XOR operations constitute conditional negation.
139 * the metrics are in the range 0-765
140 */
141 m0 = vec_add(vec_xor(Branchtab39[0].v[i],sym0v),vec_xor(Branchtab39[1].v[i],sym1v));
142 m1 = vec_xor(Branchtab39[2].v[i],sym2v);
143 metric = vec_add(m0,m1);
144 m_metric = vec_sub((vector unsigned short){765},metric);
145
146 /* Add branch metrics to path metrics */
147 m0 = vec_adds(vp->old_metrics->v[i],metric);
148 m3 = vec_adds(vp->old_metrics->v[16+i],metric);
149 m1 = vec_adds(vp->old_metrics->v[16+i],m_metric);
150 m2 = vec_adds(vp->old_metrics->v[i],m_metric);
151
152 /* Compare and select */
153 decision0 = vec_cmpgt(m0,m1);
154 decision1 = vec_cmpgt(m2,m3);
155 survivor0 = vec_min(m0,m1);
156 survivor1 = vec_min(m2,m3);
157
158 /* Store decisions and survivors.
159 * To save space without SSE2's handy PMOVMSKB instruction, we pack and store them in
160 * a funny interleaved fashion that we undo in the chainback function.
161 */
162 decisions = vec_add(decisions,decisions); /* Shift each byte 1 bit to the left */
163
164 /* Booleans are either 0xff or 0x00. Subtracting 0x00 leaves the lsb zero; subtracting
165 * 0xff is equivalent to adding 1, which sets the lsb.
166 */
167 decisions = vec_sub(decisions,(vector unsigned char)vec_pack(vec_mergeh(decision0,decision1),vec_mergel(decision0,decision1)));
168
169 vp->new_metrics->v[2*i] = vec_mergeh(survivor0,survivor1);
170 vp->new_metrics->v[2*i+1] = vec_mergel(survivor0,survivor1);
171
172 if((i % 8) == 7){
173 /* We've accumulated a total of 128 decisions, stash and start again */
174 d->v[i>>3] = decisions; /* No need to clear, the new bits will replace the old */
175 }
176 }
177 #if 0
178 /* Experimentally determine metric spread
179 * The results are fixed for a given code and input symbol size
180 */
181 {
182 int i;
183 vector unsigned short min_metric;
184 vector unsigned short max_metric;
185 union { vector unsigned short v; unsigned short s[8];} t;
186 int minimum,maximum;
187 static int max_spread = 0;
188
189 min_metric = max_metric = vp->new_metrics->v[0];
190 for(i=1;i<32;i++){
191 min_metric = vec_min(min_metric,vp->new_metrics->v[i]);
192 max_metric = vec_max(max_metric,vp->new_metrics->v[i]);
193 }
194 min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,8));
195 max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,8));
196 min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,4));
197 max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,4));
198 min_metric = vec_min(min_metric,vec_sld(min_metric,min_metric,2));
199 max_metric = vec_max(max_metric,vec_sld(max_metric,max_metric,2));
200
201 t.v = min_metric;
202 minimum = t.s[0];
203 t.v = max_metric;
204 maximum = t.s[0];
205 if(maximum-minimum > max_spread){
206 max_spread = maximum-minimum;
207 printf("metric spread = %d\n",max_spread);
208 }
209 }
210 #endif
211
212 /* Renormalize if necessary. This deserves some explanation.
213 * The maximum possible spread, found by experiment, for 8 bit symbols is about 3825
214 * So by looking at one arbitrary metric we can tell if any of them have possibly saturated.
215 * However, this is very conservative. Large spreads occur only at very high Eb/No, where
216 * saturating a bad path metric doesn't do much to increase its chances of being erroneously chosen as a survivor.
217
218 * At more interesting (low) Eb/No ratios, the spreads are much smaller so our chances of saturating a metric
219 * by not not normalizing when we should are extremely low. So either way, the risk to performance is small.
220
221 * All this is borne out by experiment.
222 */
223 if(vp->new_metrics->s[0] >= USHRT_MAX-5000){
224 vector unsigned short scale;
225 union { vector unsigned short v; unsigned short s[8];} t;
226
227 /* Find smallest metric and splat */
228 scale = vp->new_metrics->v[0];
229 for(i=1;i<32;i++)
230 scale = vec_min(scale,vp->new_metrics->v[i]);
231
232 scale = vec_min(scale,vec_sld(scale,scale,8));
233 scale = vec_min(scale,vec_sld(scale,scale,4));
234 scale = vec_min(scale,vec_sld(scale,scale,2));
235
236 /* Subtract it from all metrics
237 * Work backwards to try to improve the cache hit ratio, assuming LRU
238 */
239 for(i=31;i>=0;i--)
240 vp->new_metrics->v[i] = vec_subs(vp->new_metrics->v[i],scale);
241 t.v = scale;
242 path_metric += t.s[0];
243 }
244 d++;
245 /* Swap pointers to old and new metrics */
246 tmp = vp->old_metrics;
247 vp->old_metrics = vp->new_metrics;
248 vp->new_metrics = tmp;
249 }
250 vp->dp = d;
251 return path_metric;
252 }
253