1 /******************************************************************************
2 
3  � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4  All rights reserved.
5 
6  This is proprietary source code of Freescale Semiconductor Inc.,
7  and its use is subject to the NetComm Device Drivers EULA.
8  The copyright notice above does not evidence any actual or intended
9  publication of such source code.
10 
11  ALTERNATIVELY, redistribution and use in source and binary forms, with
12  or without modification, are permitted provided that the following
13  conditions are met:
14      * Redistributions of source code must retain the above copyright
15        notice, this list of conditions and the following disclaimer.
16      * Redistributions in binary form must reproduce the above copyright
17        notice, this list of conditions and the following disclaimer in the
18        documentation and/or other materials provided with the distribution.
19      * Neither the name of Freescale Semiconductor nor the
20        names of its contributors may be used to endorse or promote products
21        derived from this software without specific prior written permission.
22 
23  THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27  DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34 
35  **************************************************************************/
36 /******************************************************************************
37  @File          qm.c
38 
39  @Description   QM & Portal implementation
40 *//***************************************************************************/
41 #include "error_ext.h"
42 #include "std_ext.h"
43 #include "string_ext.h"
44 #include "mm_ext.h"
45 #include "qm.h"
46 #include "qman_low.h"
47 
48 
49 /****************************************/
50 /*       static functions               */
51 /****************************************/
52 
53 #define SLOW_POLL_IDLE   1000
54 #define SLOW_POLL_BUSY   10
55 
56 
57 static t_Error qman_volatile_dequeue(t_QmPortal     *p_QmPortal,
58                                      struct qman_fq *p_Fq,
59                                      uint32_t       vdqcr)
60 {
61     ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
62                 (p_Fq->state == qman_fq_state_retired));
63     ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK));
64     ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR));
65 
66     vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid;
67     NCSW_PLOCK(p_QmPortal);
68     FQLOCK(p_Fq);
69     p_Fq->flags |= QMAN_FQ_STATE_VDQCR;
70     qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr);
71     FQUNLOCK(p_Fq);
72     PUNLOCK(p_QmPortal);
73 
74     return E_OK;
75 }
76 
77 static const char *mcr_result_str(uint8_t result)
78 {
79     switch (result) {
80     case QM_MCR_RESULT_NULL:
81         return "QM_MCR_RESULT_NULL";
82     case QM_MCR_RESULT_OK:
83         return "QM_MCR_RESULT_OK";
84     case QM_MCR_RESULT_ERR_FQID:
85         return "QM_MCR_RESULT_ERR_FQID";
86     case QM_MCR_RESULT_ERR_FQSTATE:
87         return "QM_MCR_RESULT_ERR_FQSTATE";
88     case QM_MCR_RESULT_ERR_NOTEMPTY:
89         return "QM_MCR_RESULT_ERR_NOTEMPTY";
90     case QM_MCR_RESULT_PENDING:
91         return "QM_MCR_RESULT_PENDING";
92     }
93     return "<unknown MCR result>";
94 }
95 
96 static t_Error qman_create_fq(t_QmPortal        *p_QmPortal,
97                               uint32_t          fqid,
98                               uint32_t          flags,
99                               struct qman_fq    *p_Fq)
100 {
101     struct qm_fqd fqd;
102     struct qm_mcr_queryfq_np np;
103     struct qm_mc_command *p_Mcc;
104     struct qm_mc_result *p_Mcr;
105 
106     p_Fq->fqid = fqid;
107     p_Fq->flags = flags;
108     p_Fq->state = qman_fq_state_oos;
109     p_Fq->cgr_groupid = 0;
110     if (!(flags & QMAN_FQ_FLAG_RECOVER) ||
111             (flags & QMAN_FQ_FLAG_NO_MODIFY))
112         return E_OK;
113     /* Everything else is RECOVER support */
114     NCSW_PLOCK(p_QmPortal);
115     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
116     p_Mcc->queryfq.fqid = fqid;
117     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ);
118     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
119     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
120     if (p_Mcr->result != QM_MCR_RESULT_OK) {
121         PUNLOCK(p_QmPortal);
122         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result)));
123     }
124     fqd = p_Mcr->queryfq.fqd;
125     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
126     p_Mcc->queryfq_np.fqid = fqid;
127     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
128     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
129     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
130     if (p_Mcr->result != QM_MCR_RESULT_OK) {
131         PUNLOCK(p_QmPortal);
132         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result)));
133     }
134     np = p_Mcr->queryfq_np;
135     /* Phew, have queryfq and queryfq_np results, stitch together
136      * the FQ object from those. */
137     p_Fq->cgr_groupid = fqd.cgid;
138     switch (np.state & QM_MCR_NP_STATE_MASK) {
139     case QM_MCR_NP_STATE_OOS:
140         break;
141     case QM_MCR_NP_STATE_RETIRED:
142         p_Fq->state = qman_fq_state_retired;
143         if (np.frm_cnt)
144             p_Fq->flags |= QMAN_FQ_STATE_NE;
145         break;
146     case QM_MCR_NP_STATE_TEN_SCHED:
147     case QM_MCR_NP_STATE_TRU_SCHED:
148     case QM_MCR_NP_STATE_ACTIVE:
149         p_Fq->state = qman_fq_state_sched;
150         if (np.state & QM_MCR_NP_STATE_R)
151             p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
152         break;
153     case QM_MCR_NP_STATE_PARKED:
154         p_Fq->state = qman_fq_state_parked;
155         break;
156     default:
157         ASSERT_COND(FALSE);
158     }
159     if (fqd.fq_ctrl & QM_FQCTRL_CGE)
160         p_Fq->state |= QMAN_FQ_STATE_CGR_EN;
161     PUNLOCK(p_QmPortal);
162 
163     return E_OK;
164 }
165 
166 static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags)
167 {
168     /* We don't need to lock the FQ as it is a pre-condition that the FQ be
169      * quiesced. Instead, run some checks. */
170     UNUSED(flags);
171     switch (p_Fq->state) {
172     case qman_fq_state_parked:
173         ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED);
174     case qman_fq_state_oos:
175         return;
176     default:
177         break;
178     }
179     ASSERT_COND(FALSE);
180 }
181 
182 static t_Error qman_init_fq(t_QmPortal          *p_QmPortal,
183                             struct qman_fq      *p_Fq,
184                             uint32_t            flags,
185                             struct qm_mcc_initfq *p_Opts)
186 {
187     struct qm_mc_command    *p_Mcc;
188     struct qm_mc_result     *p_Mcr;
189     uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ?
190         QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED);
191 
192     SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) ||
193                               (p_Fq->state == qman_fq_state_parked),
194                               E_INVALID_STATE);
195 
196     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
197         return ERROR_CODE(E_INVALID_VALUE);
198     /* Issue an INITFQ_[PARKED|SCHED] management command */
199     NCSW_PLOCK(p_QmPortal);
200     FQLOCK(p_Fq);
201     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
202             ((p_Fq->state != qman_fq_state_oos) &&
203                 (p_Fq->state != qman_fq_state_parked))) {
204         FQUNLOCK(p_Fq);
205         PUNLOCK(p_QmPortal);
206         return ERROR_CODE(E_BUSY);
207     }
208     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
209     Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq));
210     qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb);
211     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
212     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb);
213     res = p_Mcr->result;
214     if (res != QM_MCR_RESULT_OK) {
215         FQUNLOCK(p_Fq);
216         PUNLOCK(p_QmPortal);
217         RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res)));
218     }
219 
220     if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) {
221         if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE)
222             p_Fq->flags |= QMAN_FQ_STATE_CGR_EN;
223         else
224             p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN;
225     }
226     if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID)
227         p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid;
228     p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
229             qman_fq_state_sched : qman_fq_state_parked;
230     FQUNLOCK(p_Fq);
231     PUNLOCK(p_QmPortal);
232     return E_OK;
233 }
234 
235 static t_Error qman_retire_fq(t_QmPortal        *p_QmPortal,
236                               struct qman_fq    *p_Fq,
237                               uint32_t          *p_Flags,
238                               bool              drain)
239 {
240     struct qm_mc_command    *p_Mcc;
241     struct qm_mc_result     *p_Mcr;
242     t_Error                 err = E_OK;
243     uint8_t                 res;
244 
245     SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) ||
246                               (p_Fq->state == qman_fq_state_sched),
247                               E_INVALID_STATE);
248 
249     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
250         return E_INVALID_VALUE;
251     NCSW_PLOCK(p_QmPortal);
252     FQLOCK(p_Fq);
253     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
254             (p_Fq->state == qman_fq_state_retired) ||
255                 (p_Fq->state == qman_fq_state_oos)) {
256         err = E_BUSY;
257         goto out;
258     }
259     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
260     p_Mcc->alterfq.fqid = p_Fq->fqid;
261     if (drain)
262         p_Mcc->alterfq.context_b = (uint32_t)PTR_TO_UINT(p_Fq);
263     qm_mc_commit(p_QmPortal->p_LowQmPortal,
264                  (uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE));
265     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
266     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) ==
267                 (drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE);
268     res = p_Mcr->result;
269     if (res == QM_MCR_RESULT_OK)
270     {
271         /* Process 'fq' right away, we'll ignore FQRNI */
272         if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
273             p_Fq->flags |= QMAN_FQ_STATE_NE;
274         if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
275             p_Fq->flags |= QMAN_FQ_STATE_ORL;
276         p_Fq->state = qman_fq_state_retired;
277     }
278     else if (res == QM_MCR_RESULT_PENDING)
279         p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
280     else {
281         XX_Print("ALTER_RETIRE failed: %s\n",
282                 mcr_result_str(res));
283         err = E_INVALID_STATE;
284     }
285     if (p_Flags)
286         *p_Flags = p_Fq->flags;
287 out:
288     FQUNLOCK(p_Fq);
289     PUNLOCK(p_QmPortal);
290     return err;
291 }
292 
293 static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
294 {
295     struct qm_mc_command    *p_Mcc;
296     struct qm_mc_result     *p_Mcr;
297     uint8_t                 res;
298 
299     ASSERT_COND(p_Fq->state == qman_fq_state_retired);
300     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
301         return ERROR_CODE(E_INVALID_VALUE);
302     NCSW_PLOCK(p_QmPortal);
303     FQLOCK(p_Fq);
304     if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) ||
305             (p_Fq->state != qman_fq_state_retired)) {
306         FQUNLOCK(p_Fq);
307         PUNLOCK(p_QmPortal);
308         return ERROR_CODE(E_BUSY);
309     }
310     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
311     p_Mcc->alterfq.fqid = p_Fq->fqid;
312     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS);
313     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
314     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
315     res = p_Mcr->result;
316     if (res != QM_MCR_RESULT_OK) {
317         FQUNLOCK(p_Fq);
318         PUNLOCK(p_QmPortal);
319         RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res)));
320     }
321     p_Fq->state = qman_fq_state_oos;
322 
323     FQUNLOCK(p_Fq);
324     PUNLOCK(p_QmPortal);
325     return E_OK;
326 }
327 
328 static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
329 {
330     struct qm_mc_command    *p_Mcc;
331     struct qm_mc_result     *p_Mcr;
332     uint8_t                 res;
333 
334     ASSERT_COND(p_Fq->state == qman_fq_state_parked);
335     if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
336         return ERROR_CODE(E_INVALID_VALUE);
337     /* Issue a ALTERFQ_SCHED management command */
338     NCSW_PLOCK(p_QmPortal);
339     FQLOCK(p_Fq);
340     if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
341             (p_Fq->state != qman_fq_state_parked)) {
342         FQUNLOCK(p_Fq);
343         PUNLOCK(p_QmPortal);
344         return ERROR_CODE(E_BUSY);
345     }
346     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
347     p_Mcc->alterfq.fqid = p_Fq->fqid;
348     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED);
349     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
350     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
351     res = p_Mcr->result;
352     if (res != QM_MCR_RESULT_OK) {
353         FQUNLOCK(p_Fq);
354         PUNLOCK(p_QmPortal);
355         RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res)));
356     }
357     p_Fq->state = qman_fq_state_sched;
358 
359     FQUNLOCK(p_Fq);
360     PUNLOCK(p_QmPortal);
361     return E_OK;
362 }
363 
364 /* Inline helper to reduce nesting in LoopMessageRing() */
365 static __inline__ void fq_state_change(struct qman_fq *p_Fq,
366                                        struct qm_mr_entry *p_Msg,
367                                        uint8_t verb)
368 {
369     FQLOCK(p_Fq);
370     switch(verb) {
371         case QM_MR_VERB_FQRL:
372             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL);
373             p_Fq->flags &= ~QMAN_FQ_STATE_ORL;
374             break;
375         case QM_MR_VERB_FQRN:
376             ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
377                 (p_Fq->state == qman_fq_state_sched));
378             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
379             p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING;
380             if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
381                 p_Fq->flags |= QMAN_FQ_STATE_NE;
382             if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
383                 p_Fq->flags |= QMAN_FQ_STATE_ORL;
384             p_Fq->state = qman_fq_state_retired;
385             break;
386         case QM_MR_VERB_FQPN:
387             ASSERT_COND(p_Fq->state == qman_fq_state_sched);
388             ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
389             p_Fq->state = qman_fq_state_parked;
390     }
391     FQUNLOCK(p_Fq);
392 }
393 
394 static t_Error freeDrainedFq(struct qman_fq *p_Fq)
395 {
396     t_QmFqr     *p_QmFqr;
397     uint32_t    i;
398 
399     ASSERT_COND(p_Fq);
400     p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
401     ASSERT_COND(p_QmFqr);
402 
403     ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]);
404     p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE;
405     p_QmFqr->numOfDrainedFqids++;
406     if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids)
407     {
408         for (i=0;i<p_QmFqr->numOfFqids;i++)
409         {
410             if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) &&
411                     (qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK))
412                 RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
413             qman_destroy_fq(p_QmFqr->p_Fqs[i], 0);
414             XX_FreeSmart(p_QmFqr->p_Fqs[i]);
415         }
416         XX_Free(p_QmFqr->p_DrainedFqs);
417         p_QmFqr->p_DrainedFqs = NULL;
418 
419         if (p_QmFqr->f_CompletionCB)
420         {
421             p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr);
422             XX_Free(p_QmFqr->p_Fqs);
423             if (p_QmFqr->fqidBase)
424                 QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
425             XX_Free(p_QmFqr);
426         }
427     }
428 
429     return E_OK;
430 }
431 
432 static t_Error drainRetiredFq(struct qman_fq *p_Fq)
433 {
434     t_QmFqr     *p_QmFqr;
435 
436     ASSERT_COND(p_Fq);
437     p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
438     ASSERT_COND(p_QmFqr);
439 
440     if (p_Fq->flags & QMAN_FQ_STATE_NE)
441     {
442         if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq,
443                                 (QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK)
444 
445             RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed"));
446         return E_OK;
447     }
448     else
449         return freeDrainedFq(p_Fq);
450 }
451 
452 static e_RxStoreResponse drainCB(t_Handle h_App,
453                                  t_Handle h_QmFqr,
454                                  t_Handle h_QmPortal,
455                                  uint32_t fqidOffset,
456                                  t_DpaaFD *p_Frame)
457 {
458     UNUSED(h_App);
459     UNUSED(h_QmFqr);
460     UNUSED(h_QmPortal);
461     UNUSED(fqidOffset);
462     UNUSED(p_Frame);
463 
464     DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset));
465     return e_RX_STORE_RESPONSE_CONTINUE;
466 }
467 
468 static void cb_ern_dcErn(t_Handle                   h_App,
469                          t_Handle                   h_QmPortal,
470                          struct qman_fq             *p_Fq,
471                          const struct qm_mr_entry   *p_Msg)
472 {
473     static int cnt = 0;
474     UNUSED(p_Fq);
475     UNUSED(p_Msg);
476     UNUSED(h_App);
477     UNUSED(h_QmPortal);
478 
479     XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt);
480 }
481 
482 static void cb_fqs(t_Handle                   h_App,
483                    t_Handle                   h_QmPortal,
484                    struct qman_fq             *p_Fq,
485                    const struct qm_mr_entry   *p_Msg)
486 {
487     UNUSED(p_Msg);
488     UNUSED(h_App);
489     UNUSED(h_QmPortal);
490 
491     if (p_Fq->state == qman_fq_state_retired &&
492         !(p_Fq->flags & QMAN_FQ_STATE_ORL))
493         drainRetiredFq(p_Fq);
494 }
495 
496 static void null_cb_mr(t_Handle                   h_App,
497                        t_Handle                   h_QmPortal,
498                        struct qman_fq             *p_Fq,
499                        const struct qm_mr_entry   *p_Msg)
500 {
501     t_QmPortal      *p_QmPortal = (t_QmPortal *)h_QmPortal;
502 
503     UNUSED(p_Fq);UNUSED(h_App);
504 
505     if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN)
506         XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n",
507                  p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal);
508     else
509         XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n",
510                  p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb);
511 }
512 
513 static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is)
514 {
515     struct qm_mr_entry          *p_Msg;
516 
517     if (is & QM_PIRQ_CSCI) {
518         struct qm_mc_result *p_Mcr;
519         struct qman_cgrs    tmp;
520         uint32_t            mask;
521         unsigned int        i, j;
522 
523         NCSW_PLOCK(p_QmPortal);
524         qm_mc_start(p_QmPortal->p_LowQmPortal);
525         qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION);
526         while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
527 
528         /* cgrs[0] is the portal mask for its cg's, cgrs[1] is the
529            previous state of cg's */
530         for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
531         {
532             /* get curent state */
533             tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i];
534             /* keep only cg's that are registered for this portal */
535             tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i];
536             /* handle only cg's that changed their state from previous exception */
537             tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i];
538             /* update previous */
539             p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i];
540         }
541         PUNLOCK(p_QmPortal);
542 
543         /* if in interrupt */
544         /* call the callback routines for any CG with a changed state */
545         for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
546             for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1)
547             {
548                 if(tmp.q.__state[i] & mask)
549                 {
550                     t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]);
551                     if(p_QmCg->f_Exception)
552                         p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE);
553                 }
554             }
555 
556     }
557 
558 
559     if (is & QM_PIRQ_EQRI) {
560         NCSW_PLOCK(p_QmPortal);
561         qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
562         qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0);
563         PUNLOCK(p_QmPortal);
564     }
565 
566     if (is & QM_PIRQ_MRI) {
567 mr_loop:
568         qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
569         p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
570         if (p_Msg) {
571             struct qman_fq  *p_FqFqs  = UINT_TO_PTR(p_Msg->fq.contextB);
572             struct qman_fq  *p_FqErn  = UINT_TO_PTR(p_Msg->ern.tag);
573             uint8_t         verb    =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK);
574             t_QmRejectedFrameInfo   rejectedFrameInfo;
575 
576             memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo));
577             if (!(verb & QM_MR_VERB_DC_ERN))
578             {
579                 switch(p_Msg->ern.rc)
580                 {
581                     case(QM_MR_RC_CGR_TAILDROP):
582                         rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP;
583                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
584                         break;
585                     case(QM_MR_RC_WRED):
586                         rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED;
587                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
588                         break;
589                     case(QM_MR_RC_FQ_TAILDROP):
590                         rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP;
591                         rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
592                         break;
593                     case(QM_MR_RC_ERROR):
594                         break;
595                     default:
596                         REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code"));
597                 }
598                 if (!p_FqErn)
599                     p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
600                 else
601                     p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
602             } else if (verb == QM_MR_VERB_DC_ERN)
603             {
604                 if (!p_FqErn)
605                     p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg);
606                 else
607                     p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg);
608             } else
609             {
610                 if (verb == QM_MR_VERB_FQRNI)
611                     ; /* we drop FQRNIs on the floor */
612                 else if (!p_FqFqs)
613                             p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg);
614                 else if ((verb == QM_MR_VERB_FQRN) ||
615                          (verb == QM_MR_VERB_FQRL) ||
616                          (verb == QM_MR_VERB_FQPN))
617                 {
618                     fq_state_change(p_FqFqs, p_Msg, verb);
619                     p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg);
620                 }
621             }
622             qm_mr_next(p_QmPortal->p_LowQmPortal);
623             qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
624 
625             goto mr_loop;
626         }
627     }
628 
629     return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
630 }
631 
632 static void LoopDequeueRing(t_Handle h_QmPortal)
633 {
634     struct qm_dqrr_entry        *p_Dq;
635     struct qman_fq              *p_Fq;
636     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
637     e_RxStoreResponse           tmpRes;
638     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
639     int                         prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
640 
641     while (res != qman_cb_dqrr_pause)
642     {
643         if (prefetch)
644             qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
645         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
646         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
647         if (!p_Dq)
648             break;
649 	p_Fq = UINT_TO_PTR(p_Dq->contextB);
650         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
651             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
652              * to check for clearing it when doing volatile dequeues. It's
653              * one less thing to check in the critical path (SDQCR). */
654             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
655             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
656                 res = qman_cb_dqrr_pause;
657             /* Check for VDQCR completion */
658             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
659                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
660             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
661             {
662                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
663                 freeDrainedFq(p_Fq);
664             }
665         }
666         else
667         {
668             /* Interpret 'dq' from the owner's perspective. */
669             /* use portal default handlers */
670             ASSERT_COND(p_Dq->fqid);
671             if (p_Fq)
672             {
673                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
674                                        p_Fq->h_QmFqr,
675                                        p_QmPortal,
676                                        p_Fq->fqidOffset,
677                                        (t_DpaaFD*)&p_Dq->fd);
678                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
679                     res = qman_cb_dqrr_pause;
680                 else if (p_Fq->state == qman_fq_state_waiting_parked)
681                     res = qman_cb_dqrr_park;
682             }
683             else
684             {
685                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
686                                                     NULL,
687                                                     p_QmPortal,
688                                                     p_Dq->fqid,
689                                                     (t_DpaaFD*)&p_Dq->fd);
690                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
691                     res = qman_cb_dqrr_pause;
692             }
693         }
694 
695         /* Parking isn't possible unless HELDACTIVE was set. NB,
696          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
697          * check for HELDACTIVE to cover both. */
698         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
699                     (res != qman_cb_dqrr_park));
700         if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
701             /* Defer just means "skip it, I'll consume it myself later on" */
702             if (res != qman_cb_dqrr_defer)
703                 qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
704                                            p_Dq,
705                                            (res == qman_cb_dqrr_park));
706             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
707         } else {
708             if (res == qman_cb_dqrr_park)
709                 /* The only thing to do for non-DCA is the park-request */
710                 qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
711             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
712             qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
713         }
714     }
715 }
716 
717 static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal)
718 {
719     struct qm_dqrr_entry        *p_Dq;
720     struct qman_fq              *p_Fq;
721     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
722     e_RxStoreResponse           tmpRes;
723     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
724 
725     while (res != qman_cb_dqrr_pause)
726     {
727         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
728         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
729         if (!p_Dq)
730             break;
731 	p_Fq = UINT_TO_PTR(p_Dq->contextB);
732         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
733             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
734              * to check for clearing it when doing volatile dequeues. It's
735              * one less thing to check in the critical path (SDQCR). */
736             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
737             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
738                 res = qman_cb_dqrr_pause;
739             /* Check for VDQCR completion */
740             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
741                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
742             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
743             {
744                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
745                 freeDrainedFq(p_Fq);
746             }
747         }
748         else
749         {
750             /* Interpret 'dq' from the owner's perspective. */
751             /* use portal default handlers */
752             ASSERT_COND(p_Dq->fqid);
753             if (p_Fq)
754             {
755                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
756                                        p_Fq->h_QmFqr,
757                                        p_QmPortal,
758                                        p_Fq->fqidOffset,
759                                        (t_DpaaFD*)&p_Dq->fd);
760                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
761                     res = qman_cb_dqrr_pause;
762                 else if (p_Fq->state == qman_fq_state_waiting_parked)
763                     res = qman_cb_dqrr_park;
764             }
765             else
766             {
767                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
768                                                     NULL,
769                                                     p_QmPortal,
770                                                     p_Dq->fqid,
771                                                     (t_DpaaFD*)&p_Dq->fd);
772                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
773                     res = qman_cb_dqrr_pause;
774             }
775         }
776 
777         /* Parking isn't possible unless HELDACTIVE was set. NB,
778          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
779          * check for HELDACTIVE to cover both. */
780         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
781                 (res != qman_cb_dqrr_park));
782         /* Defer just means "skip it, I'll consume it myself later on" */
783         if (res != qman_cb_dqrr_defer)
784             qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
785                                        p_Dq,
786                                        (res == qman_cb_dqrr_park));
787         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
788     }
789 }
790 
791 static void LoopDequeueRingOptimized(t_Handle h_QmPortal)
792 {
793     struct qm_dqrr_entry        *p_Dq;
794     struct qman_fq              *p_Fq;
795     enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
796     e_RxStoreResponse           tmpRes;
797     t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
798 
799     while (res != qman_cb_dqrr_pause)
800     {
801         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
802         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
803         if (!p_Dq)
804             break;
805 	p_Fq = UINT_TO_PTR(p_Dq->contextB);
806         if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
807             /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
808              * to check for clearing it when doing volatile dequeues. It's
809              * one less thing to check in the critical path (SDQCR). */
810             tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
811             if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
812                 res = qman_cb_dqrr_pause;
813             /* Check for VDQCR completion */
814             if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
815                 p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
816             if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
817             {
818                 p_Fq->flags &= ~QMAN_FQ_STATE_NE;
819                 freeDrainedFq(p_Fq);
820             }
821         }
822         else
823         {
824             /* Interpret 'dq' from the owner's perspective. */
825             /* use portal default handlers */
826             ASSERT_COND(p_Dq->fqid);
827             if (p_Fq)
828             {
829                 tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
830                                        p_Fq->h_QmFqr,
831                                        p_QmPortal,
832                                        p_Fq->fqidOffset,
833                                        (t_DpaaFD*)&p_Dq->fd);
834                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
835                     res = qman_cb_dqrr_pause;
836                 else if (p_Fq->state == qman_fq_state_waiting_parked)
837                     res = qman_cb_dqrr_park;
838             }
839             else
840             {
841                 tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
842                                                     NULL,
843                                                     p_QmPortal,
844                                                     p_Dq->fqid,
845                                                     (t_DpaaFD*)&p_Dq->fd);
846                 if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
847                     res = qman_cb_dqrr_pause;
848             }
849         }
850 
851         /* Parking isn't possible unless HELDACTIVE was set. NB,
852          * FORCEELIGIBLE implies HELDACTIVE, so we only need to
853          * check for HELDACTIVE to cover both. */
854         ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
855                 (res != qman_cb_dqrr_park));
856         if (res == qman_cb_dqrr_park)
857             /* The only thing to do for non-DCA is the park-request */
858             qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
859         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
860         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
861     }
862 }
863 
864 /* Portal interrupt handler */
865 static void portal_isr(void *ptr)
866 {
867     t_QmPortal  *p_QmPortal = ptr;
868     uint32_t    event = 0;
869     uint32_t    enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal);
870 
871     DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu));
872 
873     event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) &
874             enableEvents);
875 
876     qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event);
877     /* Only do fast-path handling if it's required */
878     if (/*(event & QM_PIRQ_DQRI) &&*/
879         (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST))
880         p_QmPortal->f_LoopDequeueRingCB(p_QmPortal);
881     if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW)
882         LoopMessageRing(p_QmPortal, event);
883 }
884 
885 
886 static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np)
887 {
888     struct qm_mc_command    *p_Mcc;
889     struct qm_mc_result     *p_Mcr;
890     uint8_t                 res;
891 
892     NCSW_PLOCK(p_QmPortal);
893     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
894     p_Mcc->queryfq_np.fqid = p_Fq->fqid;
895     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
896     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
897     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
898     res = p_Mcr->result;
899     if (res == QM_MCR_RESULT_OK)
900         *p_Np = p_Mcr->queryfq_np;
901     PUNLOCK(p_QmPortal);
902     if (res != QM_MCR_RESULT_OK)
903         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res)));
904     return E_OK;
905 }
906 
907 static uint8_t QmCgGetCgId(t_Handle h_QmCg)
908 {
909    t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
910 
911    return p_QmCg->id;
912 
913 }
914 
915 static t_Error qm_new_fq(t_QmPortal                         *p_QmPortal,
916                          uint32_t                           fqid,
917                          uint32_t                           fqidOffset,
918                          uint32_t                           channel,
919                          uint32_t                           wqid,
920                          uint16_t                           count,
921                          uint32_t                           flags,
922                          t_QmFqrCongestionAvoidanceParams   *p_CgParams,
923                          t_QmContextA                       *p_ContextA,
924                          t_QmContextB                       *p_ContextB,
925                          bool                               initParked,
926                          t_Handle                           h_QmFqr,
927                          struct qman_fq                     **p_Fqs)
928 {
929     struct qman_fq          *p_Fq = NULL;
930     struct qm_mcc_initfq    fq_opts;
931     uint32_t                i;
932     t_Error                 err = E_OK;
933     int         gap, tmp;
934     uint32_t    tmpA, tmpN, ta=0, tn=0, initFqFlag;
935 
936     ASSERT_COND(p_QmPortal);
937     ASSERT_COND(count);
938 
939     for(i=0;i<count;i++)
940     {
941         p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
942         if (!p_Fq)
943             RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
944         memset(p_Fq, 0, sizeof(struct qman_fq));
945         p_Fq->cb.dqrr     = p_QmPortal->f_DfltFrame;
946         p_Fq->cb.ern      = p_QmPortal->f_RejectedFrame;
947         p_Fq->cb.dc_ern   = cb_ern_dcErn;
948         p_Fq->cb.fqs      = cb_fqs;
949         p_Fq->h_App       = p_QmPortal->h_App;
950         p_Fq->h_QmFqr     = h_QmFqr;
951         p_Fq->fqidOffset  = fqidOffset;
952         p_Fqs[i] = p_Fq;
953         if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK)
954             break;
955     }
956 
957     if (err != E_OK)
958     {
959         for(i=0;i<count;i++)
960             if (p_Fqs[i])
961             {
962                 XX_FreeSmart(p_Fqs[i]);
963                 p_Fqs[i] = NULL;
964             }
965         RETURN_ERROR(MINOR, err, ("Failed to create Fqs"));
966     }
967 
968     memset(&fq_opts,0,sizeof(fq_opts));
969     fq_opts.fqid = fqid;
970     fq_opts.count = (uint16_t)(count-1);
971     fq_opts.we_mask |= QM_INITFQ_WE_DESTWQ;
972     fq_opts.fqd.dest.channel = channel;
973     fq_opts.fqd.dest.wq = wqid;
974     fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
975     fq_opts.fqd.fq_ctrl = (uint16_t)flags;
976 
977     if ((flags & QM_FQCTRL_CGE) || (flags & QM_FQCTRL_TDE))
978         ASSERT_COND(p_CgParams);
979 
980     if(flags & QM_FQCTRL_CGE)
981     {
982         ASSERT_COND(p_CgParams->h_QmCg);
983 
984         /* CG OAC and FQ TD may not be configured at the same time. if both are required,
985            than we configure CG first, and the FQ TD later - see below. */
986         fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg);
987         fq_opts.we_mask |= QM_INITFQ_WE_CGID;
988         if(p_CgParams->overheadAccountingLength)
989         {
990             fq_opts.we_mask |= QM_INITFQ_WE_OAC;
991             fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH;
992             fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength);
993         }
994     }
995     if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength))
996     {
997         ASSERT_COND(p_CgParams->fqTailDropThreshold);
998 
999         fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH;
1000 
1001             /* express thresh as ta*2^tn */
1002             gap = (int)p_CgParams->fqTailDropThreshold;
1003             for (tmpA=0 ; tmpA<256; tmpA++ )
1004                 for (tmpN=0 ; tmpN<32; tmpN++ )
1005                 {
1006                     tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1007                     if (tmp < gap)
1008                     {
1009                        ta = tmpA;
1010                        tn = tmpN;
1011                        gap = tmp;
1012                     }
1013                 }
1014             fq_opts.fqd.td.exp = tn;
1015             fq_opts.fqd.td.mant = ta;
1016     }
1017 
1018     if (p_ContextA)
1019     {
1020         fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA;
1021         memcpy((void*)&fq_opts.fqd.context_a, p_ContextA, sizeof(t_QmContextA));
1022     }
1023     /* If this FQ will not be used for tx, we can use contextB field */
1024     if (fq_opts.fqd.dest.channel < e_QM_FQ_CHANNEL_FMAN0_SP0)
1025     {
1026         if (sizeof(p_Fqs[0]) <= sizeof(fq_opts.fqd.context_b))
1027         {
1028             fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1029             fq_opts.fqd.context_b = (uint32_t)PTR_TO_UINT(p_Fqs[0]);
1030         }
1031         else
1032             RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("64 bit pointer (virtual) not supported yet!!!"));
1033     }
1034     else if (p_ContextB) /* Tx-Queue */
1035     {
1036         fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1037         memcpy((void*)&fq_opts.fqd.context_b, p_ContextB, sizeof(t_QmContextB));
1038     }
1039 
1040     if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1041         initFqFlag = 0;
1042     else
1043         initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED);
1044 
1045     if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK)
1046     {
1047         for(i=0;i<count;i++)
1048             if (p_Fqs[i])
1049             {
1050                 XX_FreeSmart(p_Fqs[i]);
1051                 p_Fqs[i] = NULL;
1052             }
1053         RETURN_ERROR(MINOR, err, ("Failed to init Fqs [%d-%d]", fqid, fqid+count-1));
1054     }
1055 
1056     /* if both CG OAC and FQ TD are needed, we call qman_init_fq again, this time for the FQ TD only */
1057     if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1058     {
1059         ASSERT_COND(p_CgParams->fqTailDropThreshold);
1060 
1061         fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH;
1062 
1063         /* express thresh as ta*2^tn */
1064         gap = (int)p_CgParams->fqTailDropThreshold;
1065         for (tmpA=0 ; tmpA<256; tmpA++ )
1066             for (tmpN=0 ; tmpN<32; tmpN++ )
1067             {
1068                 tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1069                 if (tmp < gap)
1070                 {
1071                    ta = tmpA;
1072                    tn = tmpN;
1073                    gap = tmp;
1074                 }
1075             }
1076         fq_opts.fqd.td.exp = tn;
1077         fq_opts.fqd.td.mant = ta;
1078         if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED), &fq_opts)) != E_OK)
1079         {
1080             for(i=0;i<count;i++)
1081                 if (p_Fqs[i])
1082                 {
1083                     XX_FreeSmart(p_Fqs[i]);
1084                     p_Fqs[i] = NULL;
1085                 }
1086             RETURN_ERROR(MINOR, err, ("Failed to init Fqs"));
1087         }
1088     }
1089 
1090 
1091     for(i=1;i<count;i++)
1092     {
1093         memcpy(p_Fqs[i], p_Fqs[0], sizeof(struct qman_fq));
1094         p_Fqs[i]->fqid += i;
1095     }
1096 
1097     return err;
1098 }
1099 
1100 
1101 static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
1102 {
1103     uint32_t flags=0;
1104 
1105     if (qman_retire_fq(p_QmPortal, p_Fq, &flags, FALSE) != E_OK)
1106         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
1107 
1108     if (flags & QMAN_FQ_STATE_CHANGING)
1109         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid));
1110 
1111     if (flags & QMAN_FQ_STATE_NE)
1112         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \
1113                                           "Frame Queue Not Empty, Need to dequeue"));
1114 
1115     if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK)
1116         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
1117 
1118     qman_destroy_fq(p_Fq,0);
1119 
1120     return E_OK;
1121 }
1122 
1123 static void qman_disable_portal(t_QmPortal *p_QmPortal)
1124 {
1125     NCSW_PLOCK(p_QmPortal);
1126     if (!(p_QmPortal->disable_count++))
1127         qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0);
1128     PUNLOCK(p_QmPortal);
1129 }
1130 
1131 
1132 /* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it
1133  * was doing (5ms is more than enough to ensure it's done). */
1134 static void clean_dqrr_mr(t_QmPortal *p_QmPortal)
1135 {
1136     struct qm_dqrr_entry    *p_Dq;
1137     struct qm_mr_entry      *p_Msg;
1138     int                     idle = 0;
1139 
1140     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1141     qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1142 drain_loop:
1143     qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1144     qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1145     qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
1146     p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1147     p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
1148     if (p_Dq) {
1149         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1150         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1151     }
1152     if (p_Msg) {
1153     qm_mr_next(p_QmPortal->p_LowQmPortal);
1154         qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1155     }
1156     if (!p_Dq && !p_Msg) {
1157     if (++idle < 5) {
1158     XX_UDelay(1000);
1159     goto drain_loop;
1160     }
1161     } else {
1162     idle = 0;
1163     goto drain_loop;
1164     }
1165 }
1166 
1167 static t_Error qman_create_portal(t_QmPortal *p_QmPortal,
1168                                    uint32_t flags,
1169                                    uint32_t sdqcrFlags,
1170                                    uint8_t  dqrrSize)
1171 {
1172     const struct qm_portal_config   *p_Config = &(p_QmPortal->p_LowQmPortal->config);
1173     int                             ret = 0;
1174     t_Error                         err;
1175     uint32_t                        isdr;
1176 
1177     if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK)
1178         RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n"));
1179 
1180     if (qm_dqrr_init(p_QmPortal->p_LowQmPortal,
1181                      sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode,
1182                      e_QmPortalPVB,
1183                      (flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI,
1184                      dqrrSize,
1185                      (flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0,
1186                      (flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) {
1187         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed"));
1188         goto fail_dqrr;
1189     }
1190 
1191     if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) {
1192         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed"));
1193         goto fail_mr;
1194     }
1195     if (qm_mc_init(p_QmPortal->p_LowQmPortal)) {
1196         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed"));
1197         goto fail_mc;
1198     }
1199     if (qm_isr_init(p_QmPortal->p_LowQmPortal)) {
1200         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed"));
1201         goto fail_isr;
1202     }
1203     /* static interrupt-gating controls */
1204     qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12);
1205     qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4);
1206     qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100);
1207     p_QmPortal->options = flags;
1208     isdr = 0xffffffff;
1209     qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff);
1210     qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions);
1211     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1212     if (flags & QMAN_PORTAL_FLAG_IRQ)
1213     {
1214         XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal);
1215         XX_EnableIntr(p_Config->irq);
1216         qm_isr_uninhibit(p_QmPortal->p_LowQmPortal);
1217     } else
1218         /* without IRQ, we can't block */
1219         flags &= ~QMAN_PORTAL_FLAG_WAIT;
1220     /* Need EQCR to be empty before continuing */
1221     isdr ^= QM_PIRQ_EQCI;
1222     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1223     ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal);
1224     if (ret) {
1225         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean"));
1226         goto fail_eqcr_empty;
1227     }
1228     isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
1229     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1230     if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL)
1231     {
1232         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean"));
1233 goto fail_dqrr_mr_empty;
1234     }
1235     if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL)
1236     {
1237         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean"));
1238 goto fail_dqrr_mr_empty;
1239     }
1240     qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0);
1241     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1242     return E_OK;
1243 fail_dqrr_mr_empty:
1244 fail_eqcr_empty:
1245     qm_isr_finish(p_QmPortal->p_LowQmPortal);
1246 fail_isr:
1247     qm_mc_finish(p_QmPortal->p_LowQmPortal);
1248 fail_mc:
1249     qm_mr_finish(p_QmPortal->p_LowQmPortal);
1250 fail_mr:
1251     qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1252 fail_dqrr:
1253     qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1254     return ERROR_CODE(E_INVALID_STATE);
1255 }
1256 
1257 static void qman_destroy_portal(t_QmPortal *p_QmPortal)
1258 {
1259     /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1260      * something related to QM_PIRQ_EQCI, this may need fixing. */
1261     qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1262     if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ)
1263     {
1264         XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq);
1265         XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq);
1266     }
1267     qm_isr_finish(p_QmPortal->p_LowQmPortal);
1268     qm_mc_finish(p_QmPortal->p_LowQmPortal);
1269     qm_mr_finish(p_QmPortal->p_LowQmPortal);
1270     qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1271     qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1272 }
1273 
1274 static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal)
1275 {
1276     struct qm_eqcr_entry    *p_Eq;
1277     uint8_t                 avail;
1278 
1279     avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal);
1280     if (avail == EQCR_THRESH)
1281         qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal);
1282     else if (avail < EQCR_THRESH)
1283             qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1284     p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal);
1285 
1286     return p_Eq;
1287 }
1288 
1289 
1290 static t_Error qman_orp_update(t_QmPortal   *p_QmPortal,
1291                                uint32_t     orpId,
1292                                uint16_t     orpSeqnum,
1293                                uint32_t     flags)
1294 {
1295     struct qm_eqcr_entry *p_Eq;
1296 
1297     NCSW_PLOCK(p_QmPortal);
1298     p_Eq = try_eq_start(p_QmPortal);
1299     if (!p_Eq)
1300     {
1301         PUNLOCK(p_QmPortal);
1302         return ERROR_CODE(E_BUSY);
1303     }
1304 
1305     if (flags & QMAN_ENQUEUE_FLAG_NESN)
1306         orpSeqnum |= QM_EQCR_SEQNUM_NESN;
1307     else
1308         /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
1309         orpSeqnum &= ~QM_EQCR_SEQNUM_NESN;
1310     p_Eq->seqnum  = orpSeqnum;
1311     p_Eq->orp     = orpId;
1312 qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP);
1313 
1314     PUNLOCK(p_QmPortal);
1315     return E_OK;
1316 }
1317 
1318 static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams)
1319 {
1320     ASSERT_COND(p_QmFqrParams);
1321 
1322     if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE)
1323         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1324     if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE)
1325         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1326     if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE)
1327         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1328     if (p_QmFqrParams->stashingParams.fqContextSize)
1329     {
1330         if (!p_QmFqrParams->stashingParams.fqContextAddr)
1331             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven"));
1332         if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE))
1333             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE));
1334         if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL)
1335             RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit"));
1336     }
1337 
1338     return E_OK;
1339 }
1340 
1341 static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t  cgId)
1342 {
1343     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1344 
1345     /* cgrs[0] is the mask of registered CG's*/
1346     if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))
1347         RETURN_ERROR(MINOR, E_BUSY, ("CG already used"));
1348 
1349     p_QmPortal->cgrs[0].q.__state[cgId/32] |=  0x80000000 >> (cgId % 32);
1350     p_QmPortal->cgsHandles[cgId] = h_QmCg;
1351 
1352     return E_OK;
1353 }
1354 
1355 static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t  cgId)
1356 {
1357     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1358 
1359     /* cgrs[0] is the mask of registered CG's*/
1360     if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))))
1361         RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use"));
1362 
1363     p_QmPortal->cgrs[0].q.__state[cgId/32] &=  ~0x80000000 >> (cgId % 32);
1364     p_QmPortal->cgsHandles[cgId] = NULL;
1365 
1366     return E_OK;
1367 }
1368 
1369 static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal)
1370 {
1371     t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1372 
1373     return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu;
1374 }
1375 
1376 static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t  *p_CurveWord)
1377 {
1378     uint32_t    maxP, roundDown, roundUp, tmpA, tmpN;
1379     uint32_t    ma=0, mn=0, slope, sa=0, sn=0, pn;
1380     int         pres = 1000;
1381     int         gap, tmp;
1382 
1383 /*  TODO - change maxTh to uint64_t?
1384    if(p_WredCurve->maxTh > (1<<39))
1385         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/
1386 
1387     /* express maxTh as ma*2^mn */
1388      gap = (int)p_WredCurve->maxTh;
1389      for (tmpA=0 ; tmpA<256; tmpA++ )
1390          for (tmpN=0 ; tmpN<32; tmpN++ )
1391          {
1392              tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<<tmpN)));
1393              if (tmp < gap)
1394              {
1395                 ma = tmpA;
1396                 mn = tmpN;
1397                 gap = tmp;
1398              }
1399          }
1400      ASSERT_COND(ma <256);
1401      ASSERT_COND(mn <32);
1402      p_WredCurve->maxTh = ma*(1<<mn);
1403 
1404      if(p_WredCurve->maxTh <= p_WredCurve->minTh)
1405         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh"));
1406      if(p_WredCurve->probabilityDenominator > 64)
1407         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64"));
1408 
1409     /* first we translate from Cisco probabilityDenominator
1410        to 256 fixed denominator, result must be divisible by 4. */
1411     /* we multiply by a fixed value to get better accuracy (without
1412        using floating point) */
1413     maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator);
1414     if (maxP % 4*pres)
1415     {
1416         roundDown  = maxP + (maxP % (4*pres));
1417         roundUp = roundDown + 4*pres;
1418         if((roundUp - maxP) > (maxP - roundDown))
1419             maxP = roundDown;
1420         else
1421             maxP = roundUp;
1422     }
1423     maxP = maxP/pres;
1424     ASSERT_COND(maxP <= 256);
1425     pn = (uint8_t)(maxP/4 - 1);
1426 
1427     if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh))
1428         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP));
1429 
1430     pres = 1000000;
1431     slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh);
1432     /* express slope as sa/2^sn */
1433     gap = (int)slope;
1434     for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres )
1435         for (tmpN=7 ; tmpN<64; tmpN++ )
1436         {
1437             tmp = ABS((int)(slope - tmpA/(1<<tmpN)));
1438             if (tmp < gap)
1439             {
1440                sa = tmpA;
1441                sn = tmpN;
1442                gap = tmp;
1443             }
1444         }
1445     sa = sa/pres;
1446     ASSERT_COND(sa<128 && sa>=64);
1447     sn = sn;
1448     ASSERT_COND(sn<64 && sn>=7);
1449 
1450     *p_CurveWord = ((ma << 24) |
1451                     (mn << 19) |
1452                     (sa << 12) |
1453                     (sn << 6) |
1454                     pn);
1455 
1456     return E_OK;
1457 }
1458 
1459 static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame)
1460 {
1461     t_QmPortal              *p_QmPortal = (t_QmPortal *)h_QmPortal;
1462     struct qm_dqrr_entry    *p_Dq;
1463     struct qman_fq          *p_Fq;
1464     int                     prefetch;
1465     uint32_t                *p_Dst, *p_Src;
1466 
1467     ASSERT_COND(p_QmPortal);
1468     ASSERT_COND(p_Frame);
1469     SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE);
1470 
1471     NCSW_PLOCK(p_QmPortal);
1472 
1473     qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr);
1474     CORE_MemoryBarrier();
1475     while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ;
1476 
1477     prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1478     while(TRUE)
1479     {
1480         if (prefetch)
1481             qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1482         qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1483         p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1484         if (!p_Dq)
1485             continue;
1486         p_Fq = UINT_TO_PTR(p_Dq->contextB);
1487         ASSERT_COND(p_Dq->fqid);
1488         p_Dst = (uint32_t *)p_Frame;
1489         p_Src = (uint32_t *)&p_Dq->fd;
1490         p_Dst[0] = p_Src[0];
1491         p_Dst[1] = p_Src[1];
1492         p_Dst[2] = p_Src[2];
1493         p_Dst[3] = p_Src[3];
1494         if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA)
1495         {
1496             qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1497                                        p_Dq,
1498                                        FALSE);
1499             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1500         }
1501         else
1502         {
1503             qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1504             qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1505         }
1506         break;
1507     }
1508 
1509     PUNLOCK(p_QmPortal);
1510 
1511     if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID))
1512         return ERROR_CODE(E_EMPTY);
1513 
1514     return E_OK;
1515 }
1516 
1517 
1518 /****************************************/
1519 /*       API Init unit functions        */
1520 /****************************************/
1521 t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam)
1522 {
1523     t_QmPortal          *p_QmPortal;
1524     uint32_t            i;
1525 
1526     SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL);
1527     SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0);
1528 
1529     p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal));
1530     if (!p_QmPortal)
1531     {
1532         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!"));
1533         return NULL;
1534     }
1535     memset(p_QmPortal, 0, sizeof(t_QmPortal));
1536 
1537     p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal));
1538     if (!p_QmPortal->p_LowQmPortal)
1539     {
1540         XX_Free(p_QmPortal);
1541         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!"));
1542         return NULL;
1543     }
1544     memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal));
1545 
1546     p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams));
1547     if (!p_QmPortal->p_QmPortalDriverParams)
1548     {
1549         XX_Free(p_QmPortal->p_LowQmPortal);
1550         XX_Free(p_QmPortal);
1551         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters"));
1552         return NULL;
1553     }
1554     memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams));
1555 
1556     p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress);
1557     p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress);
1558     p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq;
1559     p_QmPortal->p_LowQmPortal->config.bound = 0;
1560     p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId;
1561     p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId);
1562     p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock();
1563 
1564     p_QmPortal->h_Qm                = p_QmPortalParam->h_Qm;
1565     p_QmPortal->f_DfltFrame         = p_QmPortalParam->f_DfltFrame;
1566     p_QmPortal->f_RejectedFrame     = p_QmPortalParam->f_RejectedFrame;
1567     p_QmPortal->h_App               = p_QmPortalParam->h_App;
1568 
1569     p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset           = p_QmPortalParam->fdLiodnOffset;
1570     p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode          = DEFAULT_dequeueDcaMode;
1571     p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames  = DEFAULT_dequeueUpToThreeFrames;
1572     p_QmPortal->p_QmPortalDriverParams->commandType             = DEFAULT_dequeueCommandType;
1573     p_QmPortal->p_QmPortalDriverParams->userToken               = DEFAULT_dequeueUserToken;
1574     p_QmPortal->p_QmPortalDriverParams->specifiedWq             = DEFAULT_dequeueSpecifiedWq;
1575     p_QmPortal->p_QmPortalDriverParams->dedicatedChannel        = DEFAULT_dequeueDedicatedChannel;
1576     p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels =
1577         DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels;
1578     p_QmPortal->p_QmPortalDriverParams->poolChannelId           = DEFAULT_dequeuePoolChannelId;
1579     p_QmPortal->p_QmPortalDriverParams->wqId                    = DEFAULT_dequeueWqId;
1580     for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1581         p_QmPortal->p_QmPortalDriverParams->poolChannels[i] = FALSE;
1582     p_QmPortal->p_QmPortalDriverParams->dqrrSize                = DEFAULT_dqrrSize;
1583     p_QmPortal->p_QmPortalDriverParams->pullMode                = DEFAULT_pullMode;
1584 
1585     return p_QmPortal;
1586 }
1587 
1588 t_Error QM_PORTAL_Init(t_Handle h_QmPortal)
1589 {
1590     t_QmPortal                          *p_QmPortal = (t_QmPortal *)h_QmPortal;
1591     uint32_t                            i, flags=0, sdqcrFlags=0;
1592     t_Error                             err;
1593     t_QmInterModulePortalInitParams     qmParams;
1594 
1595     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1596     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1597 
1598     memset(&qmParams, 0, sizeof(qmParams));
1599     qmParams.portalId       = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu;
1600     qmParams.liodn          = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset;
1601     qmParams.dqrrLiodn      = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn;
1602     qmParams.fdFqLiodn      = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn;
1603     qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue;
1604     if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK)
1605         RETURN_ERROR(MAJOR, err, NO_MSG);
1606 
1607     flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ?
1608             0 :
1609             (QMAN_PORTAL_FLAG_IRQ |
1610              QMAN_PORTAL_FLAG_IRQ_FAST |
1611              QMAN_PORTAL_FLAG_IRQ_SLOW)));
1612     flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0);
1613     flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0;
1614     flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0;
1615 
1616     p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode;
1617     if (!p_QmPortal->pullMode)
1618     {
1619         sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1;
1620         sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken);
1621         sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType);
1622         if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq)
1623         {
1624             /* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */
1625             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0;
1626             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0;
1627             for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1628                 sdqcrFlags |= ((p_QmPortal->p_QmPortalDriverParams->poolChannels[i]) ?
1629                      QM_SDQCR_CHANNELS_POOL(i+1) : 0);
1630         }
1631         else
1632         {
1633             sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ;
1634             sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ?
1635                             QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId);
1636             sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId);
1637         }
1638     }
1639     if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA))
1640         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized;
1641     else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA))
1642         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized;
1643     else
1644         p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing;
1645 
1646     if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame))
1647         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided"));
1648 
1649     p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb));
1650     if (!p_QmPortal->p_NullCB)
1651         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!"));
1652     memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb));
1653 
1654     p_QmPortal->p_NullCB->dqrr      = p_QmPortal->f_DfltFrame;
1655     p_QmPortal->p_NullCB->ern       = p_QmPortal->f_RejectedFrame;
1656     p_QmPortal->p_NullCB->dc_ern    = p_QmPortal->p_NullCB->fqs = null_cb_mr;
1657 
1658     if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK)
1659     {
1660         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed"));
1661     }
1662 
1663     QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1664     XX_Free(p_QmPortal->p_QmPortalDriverParams);
1665     p_QmPortal->p_QmPortalDriverParams = NULL;
1666 
1667     DBG(TRACE, ("Qman-Portal %d @ %p:%p",
1668                 p_QmPortal->p_LowQmPortal->config.cpu,
1669                 p_QmPortal->p_LowQmPortal->addr.addr_ce,
1670                 p_QmPortal->p_LowQmPortal->addr.addr_ci
1671                 ));
1672 
1673     DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx",
1674                 p_QmPortal->p_LowQmPortal->config.cpu,
1675                 (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce),
1676                 (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci)
1677                 ));
1678 
1679     return E_OK;
1680 }
1681 
1682 t_Error QM_PORTAL_Free(t_Handle h_QmPortal)
1683 {
1684     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1685 
1686     if (!p_QmPortal)
1687        return ERROR_CODE(E_INVALID_HANDLE);
1688 
1689     ASSERT_COND(p_QmPortal->p_LowQmPortal);
1690     QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1691     qman_destroy_portal(p_QmPortal);
1692     if (p_QmPortal->p_NullCB)
1693         XX_Free(p_QmPortal->p_NullCB);
1694 
1695     if (p_QmPortal->p_LowQmPortal->bind_lock)
1696         XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock);
1697     if(p_QmPortal->p_QmPortalDriverParams)
1698         XX_Free(p_QmPortal->p_QmPortalDriverParams);
1699     XX_Free(p_QmPortal->p_LowQmPortal);
1700     XX_Free(p_QmPortal);
1701 
1702     return E_OK;
1703 }
1704 
1705 t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable)
1706 {
1707     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1708 
1709     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1710     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1711 
1712     p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable;
1713 
1714     return E_OK;
1715 }
1716 
1717 t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams)
1718 {
1719     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1720 
1721     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1722     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1723     SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER);
1724 
1725     p_QmPortal->p_QmPortalDriverParams->stashDestQueue  = p_StashParams->stashDestQueue;
1726     p_QmPortal->p_QmPortalDriverParams->dqrrLiodn       = p_StashParams->dqrrLiodn;
1727     p_QmPortal->p_QmPortalDriverParams->fdFqLiodn       = p_StashParams->fdFqLiodn;
1728     p_QmPortal->p_QmPortalDriverParams->eqcr            = p_StashParams->eqcr;
1729     p_QmPortal->p_QmPortalDriverParams->eqcrHighPri     = p_StashParams->eqcrHighPri;
1730     p_QmPortal->p_QmPortalDriverParams->dqrr            = p_StashParams->dqrr;
1731     p_QmPortal->p_QmPortalDriverParams->dqrrHighPri     = p_StashParams->dqrrHighPri;
1732     p_QmPortal->p_QmPortalDriverParams->fdFq            = p_StashParams->fdFq;
1733     p_QmPortal->p_QmPortalDriverParams->fdFqHighPri     = p_StashParams->fdFqHighPri;
1734     p_QmPortal->p_QmPortalDriverParams->fdFqDrop        = p_StashParams->fdFqDrop;
1735 
1736     return E_OK;
1737 }
1738 
1739 
1740 t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode)
1741 {
1742     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1743 
1744     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1745     SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1746 
1747     p_QmPortal->p_QmPortalDriverParams->pullMode  = pullMode;
1748 
1749     return E_OK;
1750 }
1751 
1752 t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId)
1753 {
1754     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1755     uint32_t    sdqcrFlags;
1756 
1757     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1758     SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE);
1759 
1760     sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal);
1761     sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1);
1762     qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1763 
1764     return E_OK;
1765 }
1766 
1767 t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source)
1768 {
1769     t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1770 
1771     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1772 
1773     NCSW_PLOCK(p_QmPortal);
1774 
1775     if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) ||
1776         (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1777     {
1778         uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal);
1779         uint32_t active = LoopMessageRing(p_QmPortal, is);
1780         if (active)
1781             qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active);
1782     }
1783     if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) ||
1784         (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1785         p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal);
1786 
1787     PUNLOCK(p_QmPortal);
1788 
1789     return E_OK;
1790 }
1791 
1792 t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo)
1793 {
1794     t_QmPortal              *p_QmPortal     = (t_QmPortal *)h_QmPortal;
1795     struct qm_dqrr_entry    *p_Dq;
1796     struct qman_fq          *p_Fq;
1797     int                     prefetch;
1798 
1799     SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1800     SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER);
1801 
1802     NCSW_PLOCK(p_QmPortal);
1803 
1804     prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1805     if (prefetch)
1806         qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1807     qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1808     p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1809     if (!p_Dq)
1810     {
1811         PUNLOCK(p_QmPortal);
1812         return ERROR_CODE(E_EMPTY);
1813     }
1814     p_Fq = UINT_TO_PTR(p_Dq->contextB);
1815     ASSERT_COND(p_Dq->fqid);
1816     if (p_Fq)
1817     {
1818         p_frameInfo->h_App = p_Fq->h_App;
1819         p_frameInfo->h_QmFqr = p_Fq->h_QmFqr;
1820         p_frameInfo->fqidOffset = p_Fq->fqidOffset;
1821         memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1822     }
1823     else
1824     {
1825         p_frameInfo->h_App = p_QmPortal->h_App;
1826         p_frameInfo->h_QmFqr = NULL;
1827         p_frameInfo->fqidOffset = p_Dq->fqid;
1828         memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1829     }
1830     if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
1831         qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1832                                    p_Dq,
1833                                    FALSE);
1834         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1835     } else {
1836         qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1837         qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1838     }
1839 
1840     PUNLOCK(p_QmPortal);
1841 
1842     return E_OK;
1843 }
1844 
1845 
1846 t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams)
1847 {
1848     t_QmFqr             *p_QmFqr;
1849     uint32_t            i, flags = 0;
1850     u_QmFqdContextA     cnxtA;
1851 
1852     SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL);
1853     SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL);
1854 
1855     if (p_QmFqrParams->shadowMode &&
1856         (!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1))
1857     {
1858         REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!"));
1859         return NULL;
1860     }
1861 
1862     p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64);
1863     if (!p_QmFqr)
1864     {
1865         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!"));
1866         return NULL;
1867     }
1868     memset(p_QmFqr, 0, sizeof(t_QmFqr));
1869 
1870     p_QmFqr->h_Qm       = p_QmFqrParams->h_Qm;
1871     p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal;
1872     p_QmFqr->shadowMode = p_QmFqrParams->shadowMode;
1873     p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ?
1874                               1 : p_QmFqrParams->numOfFqids;
1875 
1876     if (!p_QmFqr->h_QmPortal)
1877     {
1878         p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
1879         SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL);
1880     }
1881 
1882     p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1883     if (!p_QmFqr->p_Fqs)
1884     {
1885         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!"));
1886         QM_FQR_Free(p_QmFqr);
1887         return NULL;
1888     }
1889     memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1890 
1891     if (p_QmFqr->shadowMode)
1892     {
1893         struct qman_fq          *p_Fq = NULL;
1894 
1895         p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid;
1896         p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
1897         if (!p_Fq)
1898         {
1899             REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
1900             QM_FQR_Free(p_QmFqr);
1901             return NULL;
1902         }
1903         memset(p_Fq, 0, sizeof(struct qman_fq));
1904         p_Fq->cb.dqrr     = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame;
1905         p_Fq->cb.ern      = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame;
1906         p_Fq->cb.dc_ern   = cb_ern_dcErn;
1907         p_Fq->cb.fqs      = cb_fqs;
1908         p_Fq->h_App       = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App;
1909         p_Fq->h_QmFqr     = p_QmFqr;
1910         p_Fq->state       = qman_fq_state_sched;
1911         p_Fq->fqid        = p_QmFqr->fqidBase;
1912         p_QmFqr->p_Fqs[0] = p_Fq;
1913     }
1914     else
1915     {
1916         p_QmFqr->channel    = p_QmFqrParams->channel;
1917         p_QmFqr->workQueue  = p_QmFqrParams->wq;
1918 
1919         p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm,
1920                                       p_QmFqr->numOfFqids,
1921                                       p_QmFqrParams->qs.nonFrcQs.align,
1922                                       p_QmFqrParams->useForce,
1923                                       p_QmFqrParams->qs.frcQ.fqid);
1924         if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE)
1925         {
1926             REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid"));
1927             QM_FQR_Free(p_QmFqr);
1928             return NULL;
1929         }
1930 
1931         if(p_QmFqrParams->congestionAvoidanceEnable &&
1932             (p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) &&
1933             (p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0))
1934         {
1935             REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold"));
1936             QM_FQR_Free(p_QmFqr);
1937             return NULL;
1938         }
1939         if(p_QmFqrParams->congestionAvoidanceEnable)
1940         {
1941             if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg)
1942                 flags |= QM_FQCTRL_CGE;
1943             if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold)
1944                 flags |= QM_FQCTRL_TDE;
1945         }
1946 
1947     /*
1948         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_ORP : 0;
1949         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_CPCSTASH : 0;
1950         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_FORCESFDR : 0;
1951         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_AVOIDBLOCK : 0;
1952     */
1953         flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_HOLDACTIVE : 0;
1954         flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0;
1955 
1956         if (p_QmFqrParams->useContextAForStash)
1957         {
1958             if (CheckStashParams(p_QmFqrParams) != E_OK)
1959             {
1960                 REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG);
1961                 QM_FQR_Free(p_QmFqr);
1962                 return NULL;
1963             }
1964 
1965             memset(&cnxtA, 0, sizeof(cnxtA));
1966             cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE);
1967             cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE);
1968             cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE);
1969             cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff);
1970             cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr);
1971             flags |= QM_FQCTRL_CTXASTASHING;
1972         }
1973 
1974         for(i=0;i<p_QmFqr->numOfFqids;i++)
1975             if (qm_new_fq(p_QmFqr->h_QmPortal,
1976                           p_QmFqr->fqidBase+i,
1977                           i,
1978                           p_QmFqr->channel,
1979                           p_QmFqr->workQueue,
1980                           1/*p_QmFqr->numOfFqids*/,
1981                           flags,
1982                           (p_QmFqrParams->congestionAvoidanceEnable ?
1983                               &p_QmFqrParams->congestionAvoidanceParams : NULL),
1984                           p_QmFqrParams->useContextAForStash ?
1985                               (t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA,
1986                           p_QmFqrParams->p_ContextB,
1987                           p_QmFqrParams->initParked,
1988                           p_QmFqr,
1989                           &p_QmFqr->p_Fqs[i]) != E_OK)
1990             {
1991                 QM_FQR_Free(p_QmFqr);
1992                 return NULL;
1993             }
1994     }
1995     return p_QmFqr;
1996 }
1997 
1998 t_Error  QM_FQR_Free(t_Handle h_QmFqr)
1999 {
2000     t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2001     uint32_t    i;
2002 
2003     if (!p_QmFqr)
2004         return ERROR_CODE(E_INVALID_HANDLE);
2005 
2006     if (p_QmFqr->p_Fqs)
2007     {
2008         for (i=0;i<p_QmFqr->numOfFqids;i++)
2009             if (p_QmFqr->p_Fqs[i])
2010             {
2011                 if (!p_QmFqr->shadowMode)
2012                     qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]);
2013                 XX_FreeSmart(p_QmFqr->p_Fqs[i]);
2014             }
2015         XX_Free(p_QmFqr->p_Fqs);
2016     }
2017 
2018     if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase)
2019         QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2020 
2021     XX_FreeSmart(p_QmFqr);
2022 
2023     return E_OK;
2024 }
2025 
2026 t_Error  QM_FQR_FreeWDrain(t_Handle                     h_QmFqr,
2027                            t_QmFqrDrainedCompletionCB   *f_CompletionCB,
2028                            bool                         deliverFrame,
2029                            t_QmReceivedFrameCallback    *f_CallBack,
2030                            t_Handle                     h_App)
2031 {
2032     t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2033     uint32_t    i;
2034 
2035     if (!p_QmFqr)
2036         return ERROR_CODE(E_INVALID_HANDLE);
2037 
2038     if (p_QmFqr->shadowMode)
2039         RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free"));
2040 
2041     p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids);
2042     if (!p_QmFqr->p_DrainedFqs)
2043         RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining"));
2044     memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids);
2045 
2046     if (f_CompletionCB)
2047     {
2048         p_QmFqr->f_CompletionCB = f_CompletionCB;
2049         p_QmFqr->h_App          = h_App;
2050     }
2051 
2052     if (deliverFrame)
2053     {
2054         if (!f_CallBack)
2055         {
2056             REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given."));
2057             XX_Free(p_QmFqr->p_DrainedFqs);
2058             return ERROR_CODE(E_NULL_POINTER);
2059         }
2060         QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App);
2061     }
2062     else
2063         QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App);
2064 
2065     for (i=0;i<p_QmFqr->numOfFqids;i++)
2066     {
2067         if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, TRUE) != E_OK)
2068             RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
2069 
2070         if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING)
2071             DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid));
2072         else
2073             drainRetiredFq(p_QmFqr->p_Fqs[i]);
2074     }
2075 
2076     if (!p_QmFqr->f_CompletionCB)
2077     {
2078         while(p_QmFqr->p_DrainedFqs) ;
2079         DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase));
2080         XX_FreeSmart(p_QmFqr->p_Fqs);
2081         if (p_QmFqr->fqidBase)
2082             QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2083         XX_FreeSmart(p_QmFqr);
2084     }
2085 
2086     return E_OK;
2087 }
2088 
2089 t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App)
2090 {
2091     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2092     int         i;
2093 
2094     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2095 
2096     for (i=0;i<p_QmFqr->numOfFqids;i++)
2097     {
2098         p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack;
2099         p_QmFqr->p_Fqs[i]->h_App   = h_App;
2100     }
2101 
2102     return E_OK;
2103 }
2104 
2105 t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2106 {
2107     t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2108     t_QmPortal              *p_QmPortal;
2109     struct qm_eqcr_entry    *p_Eq;
2110     uint32_t                *p_Dst, *p_Src;
2111     const struct qman_fq    *p_Fq;
2112 
2113     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2114     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2115 
2116     if (!h_QmPortal)
2117     {
2118         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2119         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2120         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2121     }
2122     p_QmPortal = (t_QmPortal *)h_QmPortal;
2123 
2124     p_Fq = p_QmFqr->p_Fqs[fqidOffset];
2125 
2126 #ifdef QM_CHECKING
2127     if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE)
2128         RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
2129     if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) &&
2130         ((p_Fq->state == qman_fq_state_retired) ||
2131          (p_Fq->state == qman_fq_state_oos)))
2132         return ERROR_CODE(E_BUSY);
2133 #endif /* QM_CHECKING */
2134 
2135     NCSW_PLOCK(p_QmPortal);
2136     p_Eq = try_eq_start(p_QmPortal);
2137     if (!p_Eq)
2138     {
2139         PUNLOCK(p_QmPortal);
2140         return ERROR_CODE(E_BUSY);
2141     }
2142 
2143     p_Eq->fqid = p_Fq->fqid;
2144     p_Eq->tag = (uintptr_t)p_Fq;
2145     /* gcc does a dreadful job of the following;
2146      *  eq->fd = *fd;
2147      * It causes the entire function to save/restore a wider range of
2148      * registers, and comes up with instruction-waste galore. This will do
2149      * until we can rework the function for better code-generation. */
2150     p_Dst = (uint32_t *)&p_Eq->fd;
2151     p_Src = (uint32_t *)p_Frame;
2152     p_Dst[0] = p_Src[0];
2153     p_Dst[1] = p_Src[1];
2154     p_Dst[2] = p_Src[2];
2155     p_Dst[3] = p_Src[3];
2156 
2157     qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal,
2158                           (uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* |
2159                           (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/));
2160     PUNLOCK(p_QmPortal);
2161 
2162     return E_OK;
2163 }
2164 
2165 
2166 t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2167 {
2168     t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2169     uint32_t                pdqcr = 0;
2170 
2171     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2172     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2173     SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER);
2174     SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) ||
2175                               (p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked),
2176                               E_INVALID_STATE);
2177     if (!h_QmPortal)
2178     {
2179         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2180         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2181         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2182     }
2183 
2184     pdqcr |= QM_PDQCR_MODE_UNSCHEDULED;
2185     pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid);
2186     return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame);
2187 }
2188 
2189 t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2190 {
2191     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2192 
2193     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2194     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2195 
2196     if (!h_QmPortal)
2197     {
2198         SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2199         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2200         SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2201     }
2202     return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]);
2203 }
2204 
2205 t_Error  QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2206 {
2207     t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2208 
2209     SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2210     SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2211     SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE);
2212 
2213     UNUSED(h_QmPortal);
2214     p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked;
2215 
2216     return E_OK;
2217 }
2218 
2219 uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr)
2220 {
2221     t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2222 
2223     SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2224 
2225     return p_QmFqr->fqidBase;
2226 }
2227 
2228 uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter)
2229 {
2230     t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2231     struct qm_mcr_queryfq_np    queryfq_np;
2232 
2233     SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2234     SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0);
2235 
2236     if (!h_QmPortal)
2237     {
2238         SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0);
2239         h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2240         SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0);
2241     }
2242     if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK)
2243         return 0;
2244     switch (counter)
2245     {
2246         case e_QM_FQR_COUNTERS_FRAME :
2247             return queryfq_np.frm_cnt;
2248         case e_QM_FQR_COUNTERS_BYTE :
2249             return queryfq_np.byte_cnt;
2250         default :
2251             break;
2252     }
2253     /* should never get here */
2254     ASSERT_COND(FALSE);
2255 
2256     return 0;
2257 }
2258 
2259 
2260 t_Handle QM_CG_Create(t_QmCgParams *p_CgParams)
2261 {
2262     t_QmCg                          *p_QmCg;
2263     t_QmPortal                      *p_QmPortal;
2264     t_Error                         err;
2265     uint32_t                        wredParams;
2266     uint32_t                        tmpA, tmpN, ta=0, tn=0;
2267     int                             gap, tmp;
2268     struct qm_mc_command            *p_Mcc;
2269     struct qm_mc_result             *p_Mcr;
2270 
2271     SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL);
2272     SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL);
2273 
2274     if(p_CgParams->notifyDcPortal &&
2275        ((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3)))
2276     {
2277         REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal"));
2278         return NULL;
2279     }
2280 
2281     if (!p_CgParams->h_QmPortal)
2282     {
2283         p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm);
2284         SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL);
2285     }
2286     else
2287         p_QmPortal = p_CgParams->h_QmPortal;
2288 
2289     p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg));
2290     if (!p_QmCg)
2291     {
2292         REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!"));
2293         return NULL;
2294     }
2295     memset(p_QmCg, 0, sizeof(t_QmCg));
2296 
2297     /* build CG struct */
2298     p_QmCg->h_Qm        = p_CgParams->h_Qm;
2299     p_QmCg->h_QmPortal  = p_QmPortal;
2300     p_QmCg->h_App       = p_CgParams->h_App;
2301     err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id);
2302     if (err)
2303     {
2304         XX_Free(p_QmCg);
2305         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed"));
2306         return NULL;
2307     }
2308 
2309     NCSW_PLOCK(p_QmPortal);
2310     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2311     p_Mcc->initcgr.cgid = p_QmCg->id;
2312 
2313     err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id);
2314     if (err)
2315     {
2316         XX_Free(p_QmCg);
2317         PUNLOCK(p_QmPortal);
2318         REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed"));
2319         return NULL;
2320     }
2321 
2322     /*  Build CGR command */
2323     {
2324 #ifdef QM_CGS_NO_FRAME_MODE
2325     t_QmRevisionInfo    revInfo;
2326 
2327     QmGetRevision(p_QmCg->h_Qm, &revInfo);
2328 
2329     if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0)))
2330 #endif /* QM_CGS_NO_FRAME_MODE */
2331         if (p_CgParams->frameCount)
2332         {
2333             p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE;
2334             p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN;
2335         }
2336     }
2337 
2338     if (p_CgParams->wredEnable)
2339     {
2340         if (p_CgParams->wredParams.enableGreen)
2341         {
2342             err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams);
2343             if(err)
2344             {
2345                 XX_Free(p_QmCg);
2346                 PUNLOCK(p_QmPortal);
2347                 REPORT_ERROR(MAJOR, err, NO_MSG);
2348                 return NULL;
2349             }
2350             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2351             p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2352             p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2353         }
2354         if (p_CgParams->wredParams.enableYellow)
2355         {
2356             err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams);
2357             if(err)
2358             {
2359                 XX_Free(p_QmCg);
2360                 PUNLOCK(p_QmPortal);
2361                 REPORT_ERROR(MAJOR, err, NO_MSG);
2362                 return NULL;
2363             }
2364             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2365             p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2366             p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2367         }
2368         if (p_CgParams->wredParams.enableRed)
2369         {
2370             err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams);
2371             if(err)
2372             {
2373                 XX_Free(p_QmCg);
2374                 PUNLOCK(p_QmPortal);
2375                 REPORT_ERROR(MAJOR, err, NO_MSG);
2376                 return NULL;
2377             }
2378             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2379             p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2380             p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2381         }
2382     }
2383 
2384     if (p_CgParams->tailDropEnable)
2385     {
2386         if (!p_CgParams->threshold)
2387         {
2388             XX_Free(p_QmCg);
2389             PUNLOCK(p_QmPortal);
2390             REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable "));
2391             return NULL;
2392         }
2393         p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN;
2394         p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2395     }
2396 
2397     if (p_CgParams->threshold)
2398     {
2399         p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2400         p_QmCg->f_Exception = p_CgParams->f_Exception;
2401         if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal)
2402         {
2403             p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2404             p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG;
2405             /* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */
2406             p_Mcc->initcgr.cgr.cscn_targ = 0;
2407             if (p_QmCg->f_Exception)
2408                 p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal));
2409             if (p_CgParams->notifyDcPortal)
2410                 p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId);
2411         }
2412 
2413         /* express thresh as ta*2^tn */
2414         gap = (int)p_CgParams->threshold;
2415         for (tmpA=0 ; tmpA<256; tmpA++ )
2416             for (tmpN=0 ; tmpN<32; tmpN++ )
2417             {
2418                 tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<<tmpN)));
2419                 if (tmp < gap)
2420                 {
2421                    ta = tmpA;
2422                    tn = tmpN;
2423                    gap = tmp;
2424                 }
2425             }
2426         p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2427         p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2428     }
2429     else if(p_CgParams->f_Exception)
2430     {
2431         XX_Free(p_QmCg);
2432         PUNLOCK(p_QmPortal);
2433         REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined"));
2434         return NULL;
2435     }
2436 
2437     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR);
2438     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2439     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR);
2440     if (p_Mcr->result != QM_MCR_RESULT_OK)
2441     {
2442         XX_Free(p_QmCg);
2443         PUNLOCK(p_QmPortal);
2444         REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2445         return NULL;
2446     }
2447     PUNLOCK(p_QmPortal);
2448 
2449     return p_QmCg;
2450 }
2451 
2452 t_Error QM_CG_Free(t_Handle h_QmCg)
2453 {
2454 
2455     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2456     t_Error                 err;
2457     struct qm_mc_command    *p_Mcc;
2458     struct qm_mc_result     *p_Mcr;
2459     t_QmPortal              *p_QmPortal;
2460 
2461     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2462 
2463     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2464 
2465     NCSW_PLOCK(p_QmPortal);
2466     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2467     p_Mcc->initcgr.cgid = p_QmCg->id;
2468     p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK;
2469 
2470     err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id);
2471     if(err)
2472     {
2473         XX_Free(p_QmCg);
2474         PUNLOCK(p_QmPortal);
2475         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed"));
2476     }
2477 
2478     err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id);
2479     if(err)
2480     {
2481         XX_Free(p_QmCg);
2482         PUNLOCK(p_QmPortal);
2483         RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed"));
2484     }
2485 
2486     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2487     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2488     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2489     if (p_Mcr->result != QM_MCR_RESULT_OK)
2490     {
2491         PUNLOCK(p_QmPortal);
2492         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2493     }
2494     PUNLOCK(p_QmPortal);
2495 
2496     XX_Free(p_QmCg);
2497 
2498     return E_OK;
2499 }
2500 
2501 t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable)
2502 {
2503     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2504     struct qm_mc_command    *p_Mcc;
2505     struct qm_mc_result     *p_Mcr;
2506     t_QmPortal              *p_QmPortal;
2507 
2508     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2509 
2510     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2511     if (!p_QmCg->f_Exception)
2512         RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured."));
2513 
2514     NCSW_PLOCK(p_QmPortal);
2515     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2516     p_Mcc->initcgr.cgid = p_QmCg->id;
2517     p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN;
2518 
2519     if(exception == e_QM_EX_CG_STATE_CHANGE)
2520     {
2521         if(enable)
2522             p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2523     }
2524     else
2525     {
2526         PUNLOCK(p_QmPortal);
2527         RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception"));
2528     }
2529 
2530     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2531     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2532     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2533     if (p_Mcr->result != QM_MCR_RESULT_OK)
2534     {
2535         PUNLOCK(p_QmPortal);
2536         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2537     }
2538     PUNLOCK(p_QmPortal);
2539 
2540     return E_OK;
2541 }
2542 
2543 t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams)
2544 {
2545     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2546     uint32_t                wredParams;
2547     struct qm_mc_command    *p_Mcc;
2548     struct qm_mc_result     *p_Mcr;
2549     t_QmPortal              *p_QmPortal;
2550     t_Error                 err = E_OK;
2551 
2552     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2553 
2554     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2555 
2556     NCSW_PLOCK(p_QmPortal);
2557     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2558     p_Mcc->initcgr.cgid = p_QmCg->id;
2559 
2560     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2561     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2562     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2563     if (p_Mcr->result != QM_MCR_RESULT_OK)
2564     {
2565         PUNLOCK(p_QmPortal);
2566         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2567     }
2568 
2569     switch(p_QmCgModifyParams->color)
2570     {
2571         case(e_QM_CG_COLOR_GREEN):
2572             if(!p_Mcr->querycgr.cgr.wr_en_g)
2573             {
2574                 PUNLOCK(p_QmPortal);
2575                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green"));
2576             }
2577             break;
2578         case(e_QM_CG_COLOR_YELLOW):
2579             if(!p_Mcr->querycgr.cgr.wr_en_y)
2580             {
2581                 PUNLOCK(p_QmPortal);
2582                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow"));
2583             }
2584             break;
2585         case(e_QM_CG_COLOR_RED):
2586             if(!p_Mcr->querycgr.cgr.wr_en_r)
2587             {
2588                 PUNLOCK(p_QmPortal);
2589                 RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red"));
2590             }
2591             break;
2592     }
2593 
2594     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2595     p_Mcc->initcgr.cgid = p_QmCg->id;
2596 
2597     switch(p_QmCgModifyParams->color)
2598     {
2599         case(e_QM_CG_COLOR_GREEN):
2600             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2601             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2602             p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2603             p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2604             break;
2605         case(e_QM_CG_COLOR_YELLOW):
2606             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2607             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2608             p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2609             p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2610             break;
2611         case(e_QM_CG_COLOR_RED):
2612             err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2613             p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2614             p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2615             p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2616             break;
2617     }
2618     if (err)
2619     {
2620         PUNLOCK(p_QmPortal);
2621         RETURN_ERROR(MINOR, err, NO_MSG);
2622     }
2623 
2624     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2625     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2626     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2627     if (p_Mcr->result != QM_MCR_RESULT_OK)
2628     {
2629         PUNLOCK(p_QmPortal);
2630         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2631     }
2632     PUNLOCK(p_QmPortal);
2633 
2634     return E_OK;
2635 }
2636 
2637 t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold)
2638 {
2639     t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2640     struct qm_mc_command    *p_Mcc;
2641     struct qm_mc_result     *p_Mcr;
2642     t_QmPortal              *p_QmPortal;
2643     uint32_t                tmpA, tmpN, ta=0, tn=0;
2644     int                     gap, tmp;
2645 
2646     SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2647 
2648     p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2649 
2650     NCSW_PLOCK(p_QmPortal);
2651     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2652     p_Mcc->initcgr.cgid = p_QmCg->id;
2653 
2654     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2655     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2656     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2657     if (p_Mcr->result != QM_MCR_RESULT_OK)
2658     {
2659         PUNLOCK(p_QmPortal);
2660         RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2661     }
2662 
2663     if(!p_Mcr->querycgr.cgr.cstd_en)
2664     {
2665         PUNLOCK(p_QmPortal);
2666         RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!"));
2667     }
2668 
2669     p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2670     p_Mcc->initcgr.cgid = p_QmCg->id;
2671     p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2672 
2673     /* express thresh as ta*2^tn */
2674     gap = (int)threshold;
2675     for (tmpA=0 ; tmpA<256; tmpA++ )
2676         for (tmpN=0 ; tmpN<32; tmpN++ )
2677         {
2678             tmp = ABS((int)(threshold - tmpA*(1<<tmpN)));
2679             if (tmp < gap)
2680             {
2681                ta = tmpA;
2682                tn = tmpN;
2683                gap = tmp;
2684             }
2685         }
2686     p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2687     p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2688 
2689     qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2690     while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2691     ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2692     if (p_Mcr->result != QM_MCR_RESULT_OK)
2693     {
2694         PUNLOCK(p_QmPortal);
2695         RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2696     }
2697     PUNLOCK(p_QmPortal);
2698 
2699     return E_OK;
2700 }
2701 
2702