1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Copyright (C) 2014 Freescale Semiconductor
4  */
5 
6 #ifndef _FSL_QBMAN_PORTAL_H
7 #define _FSL_QBMAN_PORTAL_H
8 
9 #include <fsl-mc/fsl_qbman_base.h>
10 
11 /* Create and destroy a functional object representing the given QBMan portal
12  * descriptor. */
13 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *);
14 
15 	/************/
16 	/* Dequeues */
17 	/************/
18 
19 /* See the QBMan driver API documentation for details on the enqueue
20  * mechanisms. NB: the use of a 'ldpaa_' prefix for this type is because it is
21  * primarily used by the "DPIO" layer that sits above (and hides) the QBMan
22  * driver. The structure is defined in the DPIO interface, but to avoid circular
23  * dependencies we just pre/re-declare it here opaquely. */
24 struct ldpaa_dq;
25 
26 
27 /* ------------------- */
28 /* Pull-mode dequeuing */
29 /* ------------------- */
30 
31 struct qbman_pull_desc {
32 	uint32_t dont_manipulate_directly[6];
33 };
34 
35 /* Clear the contents of a descriptor to default/starting state. */
36 void qbman_pull_desc_clear(struct qbman_pull_desc *);
37 /* If not called, or if called with 'storage' as NULL, the result pull dequeues
38  * will produce results to DQRR. If 'storage' is non-NULL, then results are
39  * produced to the given memory location (using the physical/DMA address which
40  * the caller provides in 'storage_phys'), and 'stash' controls whether or not
41  * those writes to main-memory express a cache-warming attribute. */
42 void qbman_pull_desc_set_storage(struct qbman_pull_desc *,
43 				 struct ldpaa_dq *storage,
44 				 dma_addr_t storage_phys,
45 				 int stash);
46 /* numframes must be between 1 and 16, inclusive */
47 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes);
48 /* token is the value that shows up in the dequeue results that can be used to
49  * detect when the results have been published, and is not really used when
50  * dequeue results go to DQRR. The easiest technique is to zero result "storage"
51  * before issuing a pull dequeue, and use any non-zero 'token' value. */
52 void qbman_pull_desc_set_token(struct qbman_pull_desc *, uint8_t token);
53 /* Exactly one of the following descriptor "actions" should be set. (Calling any
54  * one of these will replace the effect of any prior call to one of these.)
55  * - pull dequeue from the given frame queue (FQ)
56  * - pull dequeue from any FQ in the given work queue (WQ)
57  * - pull dequeue from any FQ in any WQ in the given channel
58  */
59 void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid);
60 
61 /* Issue the pull dequeue command */
62 int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *);
63 
64 /* -------------------------------- */
65 /* Polling DQRR for dequeue results */
66 /* -------------------------------- */
67 
68 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
69  * only once, so repeated calls can return a sequence of DQRR entries, without
70  * requiring they be consumed immediately or in any particular order. */
71 const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *);
72 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
73 void qbman_swp_dqrr_consume(struct qbman_swp *, const struct ldpaa_dq *);
74 
75 /* ------------------------------------------------- */
76 /* Polling user-provided storage for dequeue results */
77 /* ------------------------------------------------- */
78 
79 /* Only used for user-provided storage of dequeue results, not DQRR. Prior to
80  * being used, the storage must set "oldtoken", so that the driver notices when
81  * hardware has filled it in with results using a "newtoken". NB, for efficiency
82  * purposes, the driver will perform any required endianness conversion to
83  * ensure that the user's dequeue result storage is in host-endian format
84  * (whether or not that is the same as the little-endian format that hardware
85  * DMA'd to the user's storage). As such, once the user has called
86  * qbman_dq_entry_has_newtoken() and been returned a valid dequeue result, they
87  * should not call it again on the same memory location (except of course if
88  * another dequeue command has been executed to produce a new result to that
89  * location).
90  */
91 void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *,
92 				 unsigned int num_entries,
93 				 uint8_t oldtoken);
94 int qbman_dq_entry_has_newtoken(struct qbman_swp *,
95 				const struct ldpaa_dq *,
96 				uint8_t newtoken);
97 
98 /* -------------------------------------------------------- */
99 /* Parsing dequeue entries (DQRR and user-provided storage) */
100 /* -------------------------------------------------------- */
101 
102 /* DQRR entries may contain non-dequeue results, ie. notifications */
103 int qbman_dq_entry_is_DQ(const struct ldpaa_dq *);
104 
105 	/************/
106 	/* Enqueues */
107 	/************/
108 
109 struct qbman_eq_desc {
110 	uint32_t dont_manipulate_directly[8];
111 };
112 
113 
114 /* Clear the contents of a descriptor to default/starting state. */
115 void qbman_eq_desc_clear(struct qbman_eq_desc *);
116 /* Exactly one of the following descriptor "actions" should be set. (Calling
117  * any one of these will replace the effect of any prior call to one of these.)
118  * - enqueue without order-restoration
119  * - enqueue with order-restoration
120  * - fill a hole in the order-restoration sequence, without any enqueue
121  * - advance NESN (Next Expected Sequence Number), without any enqueue
122  * 'respond_success' indicates whether an enqueue response should be DMA'd
123  * after success (otherwise a response is DMA'd only after failure).
124  * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
125  * be enqueued.
126  */
127 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *, int respond_success);
128 void qbman_eq_desc_set_response(struct qbman_eq_desc *,
129 				dma_addr_t storage_phys,
130 				int stash);
131 /* token is the value that shows up in an enqueue response that can be used to
132  * detect when the results have been published. The easiest technique is to zero
133  * result "storage" before issuing an enqueue, and use any non-zero 'token'
134  * value. */
135 void qbman_eq_desc_set_token(struct qbman_eq_desc *, uint8_t token);
136 /* Exactly one of the following descriptor "targets" should be set. (Calling any
137  * one of these will replace the effect of any prior call to one of these.)
138  * - enqueue to a frame queue
139  * - enqueue to a queuing destination
140  * Note, that none of these will have any affect if the "action" type has been
141  * set to "orp_hole" or "orp_nesn".
142  */
143 void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid);
144 void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid,
145 			  uint32_t qd_bin, uint32_t qd_prio);
146 
147 /* Issue an enqueue command. ('fd' should only be NULL if the "action" of the
148  * descriptor is "orp_hole" or "orp_nesn".) */
149 int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *,
150 		      const struct qbman_fd *fd);
151 
152 	/*******************/
153 	/* Buffer releases */
154 	/*******************/
155 
156 struct qbman_release_desc {
157 	uint32_t dont_manipulate_directly[1];
158 };
159 
160 /* Clear the contents of a descriptor to default/starting state. */
161 void qbman_release_desc_clear(struct qbman_release_desc *);
162 /* Set the ID of the buffer pool to release to */
163 void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid);
164 /* Issue a release command. 'num_buffers' must be less than 8. */
165 int qbman_swp_release(struct qbman_swp *, const struct qbman_release_desc *,
166 		      const uint64_t *buffers, unsigned int num_buffers);
167 
168 	/*******************/
169 	/* Buffer acquires */
170 	/*******************/
171 
172 int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers,
173 		      unsigned int num_buffers);
174 #endif /* !_FSL_QBMAN_PORTAL_H */
175