1*d14abf15SRobert Mustacchi
2*d14abf15SRobert Mustacchi #include "lm5710.h"
3*d14abf15SRobert Mustacchi #include "bd_chain.h"
4*d14abf15SRobert Mustacchi #include "command.h"
5*d14abf15SRobert Mustacchi #include "context.h"
6*d14abf15SRobert Mustacchi #include "lm_l4fp.h"
7*d14abf15SRobert Mustacchi #include "lm_l4sp.h"
8*d14abf15SRobert Mustacchi #include "mm_l4if.h"
9*d14abf15SRobert Mustacchi #include "mm.h"
10*d14abf15SRobert Mustacchi
11*d14abf15SRobert Mustacchi /* The maximum counter value for consumed count, if it exceeds this value we post it to firmware.
12*d14abf15SRobert Mustacchi * FW holds 32bits for this counter. Therefore 100MB is OK (see L4 VBD spec) */
13*d14abf15SRobert Mustacchi #define MAX_GRQ_COUNTER 0x6400000
14*d14abf15SRobert Mustacchi #define IS_OOO_CQE(__cmd) ((__cmd == CMP_OPCODE_TOE_GNI) \
15*d14abf15SRobert Mustacchi || (__cmd == CMP_OPCODE_TOE_GAIR) \
16*d14abf15SRobert Mustacchi || (__cmd == CMP_OPCODE_TOE_GAIL) \
17*d14abf15SRobert Mustacchi || (__cmd == CMP_OPCODE_TOE_GRI) \
18*d14abf15SRobert Mustacchi || (__cmd == CMP_OPCODE_TOE_GJ) \
19*d14abf15SRobert Mustacchi || (__cmd == CMP_OPCODE_TOE_DGI))
20*d14abf15SRobert Mustacchi
21*d14abf15SRobert Mustacchi typedef struct toe_rx_bd toe_rx_bd_t;
22*d14abf15SRobert Mustacchi
23*d14abf15SRobert Mustacchi static u16_t lm_squeeze_rx_buffer_list(
24*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
25*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
26*d14abf15SRobert Mustacchi u16_t adjust_number,
27*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t ** unwanted_gen_buf
28*d14abf15SRobert Mustacchi );
29*d14abf15SRobert Mustacchi
30*d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_rx_post_buf(
31*d14abf15SRobert Mustacchi struct _lm_device_t *pdev,
32*d14abf15SRobert Mustacchi lm_tcp_state_t *tcp,
33*d14abf15SRobert Mustacchi lm_tcp_buffer_t *tcp_buf,
34*d14abf15SRobert Mustacchi lm_frag_list_t *frag_list
35*d14abf15SRobert Mustacchi );
36*d14abf15SRobert Mustacchi
37*d14abf15SRobert Mustacchi static void lm_tcp_incr_consumed_gen(
38*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
39*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
40*d14abf15SRobert Mustacchi u32_t nbytes
41*d14abf15SRobert Mustacchi );
42*d14abf15SRobert Mustacchi
43*d14abf15SRobert Mustacchi static void lm_tcp_return_gen_bufs(
44*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
45*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
46*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf,
47*d14abf15SRobert Mustacchi u32_t flags,
48*d14abf15SRobert Mustacchi u8_t grq_idx
49*d14abf15SRobert Mustacchi );
50*d14abf15SRobert Mustacchi
51*d14abf15SRobert Mustacchi static void lm_tcp_return_list_of_gen_bufs(
52*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
53*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
54*d14abf15SRobert Mustacchi d_list_t * gen_buf_list,
55*d14abf15SRobert Mustacchi u32_t flags,
56*d14abf15SRobert Mustacchi u8_t grq_idx
57*d14abf15SRobert Mustacchi );
58*d14abf15SRobert Mustacchi
_lm_tcp_isle_get_free_list(struct _lm_device_t * pdev,u8_t grq_idx)59*d14abf15SRobert Mustacchi static lm_isle_t * _lm_tcp_isle_get_free_list(
60*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
61*d14abf15SRobert Mustacchi u8_t grq_idx)
62*d14abf15SRobert Mustacchi {
63*d14abf15SRobert Mustacchi lm_isle_t * free_list = NULL;
64*d14abf15SRobert Mustacchi lm_isle_t * isles_pool = pdev->toe_info.grqs[grq_idx].isles_pool;
65*d14abf15SRobert Mustacchi u32_t isle_pool_idx;
66*d14abf15SRobert Mustacchi u32_t isle_pool_size = pdev->params.l4_isles_pool_size;
67*d14abf15SRobert Mustacchi DbgBreakIf(!isles_pool);
68*d14abf15SRobert Mustacchi for (isle_pool_idx = 0; isle_pool_idx < isle_pool_size; isle_pool_idx++) {
69*d14abf15SRobert Mustacchi if ((isles_pool[isle_pool_idx].isle_link.next == NULL) && (isles_pool[isle_pool_idx].isle_link.prev == NULL)) {
70*d14abf15SRobert Mustacchi free_list = isles_pool + isle_pool_idx;
71*d14abf15SRobert Mustacchi break;
72*d14abf15SRobert Mustacchi }
73*d14abf15SRobert Mustacchi }
74*d14abf15SRobert Mustacchi DbgBreakIf(!free_list);
75*d14abf15SRobert Mustacchi return free_list;
76*d14abf15SRobert Mustacchi }
77*d14abf15SRobert Mustacchi
_lm_tcp_isle_find(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t num_isle)78*d14abf15SRobert Mustacchi static lm_isle_t * _lm_tcp_isle_find(
79*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
80*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
81*d14abf15SRobert Mustacchi u8_t num_isle)
82*d14abf15SRobert Mustacchi {
83*d14abf15SRobert Mustacchi lm_isle_t * isle = NULL;
84*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
85*d14abf15SRobert Mustacchi u8_t isle_cnt, isle_idx;
86*d14abf15SRobert Mustacchi
87*d14abf15SRobert Mustacchi DbgBreakIf(!(tcp && tcp->rx_con));
88*d14abf15SRobert Mustacchi gen_info = &tcp->rx_con->u.rx.gen_info;
89*d14abf15SRobert Mustacchi isle_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
90*d14abf15SRobert Mustacchi DbgBreakIf(!isle_cnt);
91*d14abf15SRobert Mustacchi DbgBreakIf(num_isle > isle_cnt);
92*d14abf15SRobert Mustacchi if (num_isle == gen_info->current_isle_number) {
93*d14abf15SRobert Mustacchi isle = gen_info->current_isle;
94*d14abf15SRobert Mustacchi } else {
95*d14abf15SRobert Mustacchi isle = (lm_isle_t*)gen_info->isles_list.head;
96*d14abf15SRobert Mustacchi for (isle_idx = 1; isle_idx < num_isle; isle_idx++) {
97*d14abf15SRobert Mustacchi isle = (lm_isle_t*)d_list_next_entry(&isle->isle_link);
98*d14abf15SRobert Mustacchi }
99*d14abf15SRobert Mustacchi gen_info->current_isle_number = num_isle;
100*d14abf15SRobert Mustacchi gen_info->current_isle = isle;
101*d14abf15SRobert Mustacchi }
102*d14abf15SRobert Mustacchi return isle;
103*d14abf15SRobert Mustacchi }
104*d14abf15SRobert Mustacchi
_lm_tcp_isle_remove(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t grq_idx,u8_t num_isle,d_list_t * gen_buf_list)105*d14abf15SRobert Mustacchi static u32_t _lm_tcp_isle_remove(
106*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
107*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
108*d14abf15SRobert Mustacchi u8_t grq_idx,
109*d14abf15SRobert Mustacchi u8_t num_isle,
110*d14abf15SRobert Mustacchi d_list_t * gen_buf_list)
111*d14abf15SRobert Mustacchi {
112*d14abf15SRobert Mustacchi u32_t nbytes = 0;
113*d14abf15SRobert Mustacchi lm_isle_t * new_current_isle = NULL;
114*d14abf15SRobert Mustacchi lm_isle_t * isle = NULL;
115*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
116*d14abf15SRobert Mustacchi u8_t isles_cnt;
117*d14abf15SRobert Mustacchi u8_t new_current_isle_num;
118*d14abf15SRobert Mustacchi
119*d14abf15SRobert Mustacchi DbgBreakIf(!(tcp && tcp->rx_con));
120*d14abf15SRobert Mustacchi gen_info = &tcp->rx_con->u.rx.gen_info;
121*d14abf15SRobert Mustacchi isles_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
122*d14abf15SRobert Mustacchi DbgBreakIf(!(num_isle && (num_isle <= isles_cnt)));
123*d14abf15SRobert Mustacchi isle = _lm_tcp_isle_find(pdev,tcp,num_isle);
124*d14abf15SRobert Mustacchi
125*d14abf15SRobert Mustacchi // DbgBreakIf((isles_cnt > 1) && (num_isle == 1));
126*d14abf15SRobert Mustacchi if (isle->isle_link.next != NULL) {
127*d14abf15SRobert Mustacchi new_current_isle = (lm_isle_t*)isle->isle_link.next;
128*d14abf15SRobert Mustacchi new_current_isle_num = num_isle;
129*d14abf15SRobert Mustacchi } else if (isle->isle_link.prev != NULL) {
130*d14abf15SRobert Mustacchi new_current_isle = (lm_isle_t*)isle->isle_link.prev;
131*d14abf15SRobert Mustacchi new_current_isle_num = num_isle - 1;
132*d14abf15SRobert Mustacchi } else {
133*d14abf15SRobert Mustacchi new_current_isle = NULL;
134*d14abf15SRobert Mustacchi new_current_isle_num = 0;
135*d14abf15SRobert Mustacchi }
136*d14abf15SRobert Mustacchi
137*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
138*d14abf15SRobert Mustacchi #pragma prefast (push)
139*d14abf15SRobert Mustacchi #pragma prefast (disable:6011)
140*d14abf15SRobert Mustacchi #endif //_NTDDK_
141*d14abf15SRobert Mustacchi d_list_remove_entry(&gen_info->isles_list, &isle->isle_link);
142*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
143*d14abf15SRobert Mustacchi #pragma prefast (pop)
144*d14abf15SRobert Mustacchi #endif //_NTDDK_
145*d14abf15SRobert Mustacchi
146*d14abf15SRobert Mustacchi nbytes = isle->isle_nbytes;
147*d14abf15SRobert Mustacchi d_list_add_tail(gen_buf_list, &isle->isle_gen_bufs_list_head);
148*d14abf15SRobert Mustacchi d_list_init(&isle->isle_gen_bufs_list_head, NULL, NULL, 0);
149*d14abf15SRobert Mustacchi if (new_current_isle_num) {
150*d14abf15SRobert Mustacchi if (num_isle == 1) {
151*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
152*d14abf15SRobert Mustacchi #pragma prefast (push)
153*d14abf15SRobert Mustacchi #pragma prefast (disable:28182)
154*d14abf15SRobert Mustacchi #endif //_NTDDK_
155*d14abf15SRobert Mustacchi d_list_remove_entry(&gen_info->isles_list, &new_current_isle->isle_link);
156*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
157*d14abf15SRobert Mustacchi #pragma prefast (pop)
158*d14abf15SRobert Mustacchi #endif //_NTDDK_
159*d14abf15SRobert Mustacchi d_list_add_tail(&isle->isle_gen_bufs_list_head, &new_current_isle->isle_gen_bufs_list_head);
160*d14abf15SRobert Mustacchi d_list_push_head(&gen_info->isles_list, &isle->isle_link);
161*d14abf15SRobert Mustacchi isle->isle_nbytes = new_current_isle->isle_nbytes;
162*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
163*d14abf15SRobert Mustacchi isle->dedicated_cid = new_current_isle->dedicated_cid;
164*d14abf15SRobert Mustacchi isle->recent_ooo_combined_cqe = new_current_isle->recent_ooo_combined_cqe;
165*d14abf15SRobert Mustacchi #endif
166*d14abf15SRobert Mustacchi isle = new_current_isle;
167*d14abf15SRobert Mustacchi new_current_isle = &gen_info->first_isle;
168*d14abf15SRobert Mustacchi }
169*d14abf15SRobert Mustacchi mm_mem_zero(&isle->isle_gen_bufs_list_head, sizeof(lm_isle_t) - sizeof(d_list_entry_t));
170*d14abf15SRobert Mustacchi isle->isle_link.next = isle->isle_link.prev = NULL;
171*d14abf15SRobert Mustacchi }
172*d14abf15SRobert Mustacchi gen_info->current_isle = new_current_isle;
173*d14abf15SRobert Mustacchi gen_info->current_isle_number = new_current_isle_num;
174*d14abf15SRobert Mustacchi return nbytes;
175*d14abf15SRobert Mustacchi }
176*d14abf15SRobert Mustacchi
177*d14abf15SRobert Mustacchi u32_t lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t max_num_bytes_to_copy, u8_t sb_idx);
178*d14abf15SRobert Mustacchi
179*d14abf15SRobert Mustacchi /* TODO: remove this temporary solution for solaris / linux compilation conflict, linux needs the
180*d14abf15SRobert Mustacchi * first option, solaris the latter */
181*d14abf15SRobert Mustacchi #if defined(__LINUX)
182*d14abf15SRobert Mustacchi #define TOE_RX_INIT_ZERO {{0}}
183*d14abf15SRobert Mustacchi #else
184*d14abf15SRobert Mustacchi #define TOE_RX_INIT_ZERO {0}
185*d14abf15SRobert Mustacchi #endif
186*d14abf15SRobert Mustacchi
187*d14abf15SRobert Mustacchi #define TOE_RX_DOORBELL(pdev,cid) do{\
188*d14abf15SRobert Mustacchi struct doorbell db = TOE_RX_INIT_ZERO;\
189*d14abf15SRobert Mustacchi db.header.data |= ((TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT) |\
190*d14abf15SRobert Mustacchi (DOORBELL_HDR_T_RX << DOORBELL_HDR_T_RX_SHIFT));\
191*d14abf15SRobert Mustacchi DOORBELL((pdev), (cid), *((u32_t *)&db));\
192*d14abf15SRobert Mustacchi } while(0)
193*d14abf15SRobert Mustacchi
lm_tcp_rx_write_db(lm_device_t * pdev,lm_tcp_state_t * tcp)194*d14abf15SRobert Mustacchi static __inline void lm_tcp_rx_write_db(
195*d14abf15SRobert Mustacchi lm_device_t *pdev,
196*d14abf15SRobert Mustacchi lm_tcp_state_t *tcp
197*d14abf15SRobert Mustacchi )
198*d14abf15SRobert Mustacchi {
199*d14abf15SRobert Mustacchi lm_tcp_con_t *rx_con = tcp->rx_con;
200*d14abf15SRobert Mustacchi volatile struct toe_rx_db_data *db_data = rx_con->db_data.rx;
201*d14abf15SRobert Mustacchi
202*d14abf15SRobert Mustacchi if (!(rx_con->flags & TCP_RX_DB_BLOCKED)) {
203*d14abf15SRobert Mustacchi db_data->bds_prod += rx_con->db_more_bds; /* nbds should be written before nbytes (FW assumption) */
204*d14abf15SRobert Mustacchi db_data->bytes_prod += rx_con->db_more_bytes;
205*d14abf15SRobert Mustacchi
206*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4rx,
207*d14abf15SRobert Mustacchi "_lm_tcp_rx_write_db: cid=%d, (nbytes+=%d, nbds+=%d)\n",
208*d14abf15SRobert Mustacchi tcp->cid, rx_con->db_more_bytes, rx_con->db_more_bds);
209*d14abf15SRobert Mustacchi TOE_RX_DOORBELL(pdev, tcp->cid);
210*d14abf15SRobert Mustacchi }
211*d14abf15SRobert Mustacchi
212*d14abf15SRobert Mustacchi /* assert if the new addition will make the cyclic counter post_cnt smaller than comp_cnt */
213*d14abf15SRobert Mustacchi DbgBreakIf(S64_SUB(rx_con->bytes_post_cnt + rx_con->db_more_bytes, rx_con->bytes_comp_cnt) < 0);
214*d14abf15SRobert Mustacchi rx_con->bytes_post_cnt += rx_con->db_more_bytes;
215*d14abf15SRobert Mustacchi rx_con->buffer_post_cnt += rx_con->db_more_bufs;
216*d14abf15SRobert Mustacchi rx_con->db_more_bytes = rx_con->db_more_bds = rx_con->db_more_bufs = 0;
217*d14abf15SRobert Mustacchi rx_con->fp_db_cnt++;
218*d14abf15SRobert Mustacchi }
219*d14abf15SRobert Mustacchi
220*d14abf15SRobert Mustacchi /** Description
221*d14abf15SRobert Mustacchi * This function is used to increase the window-size. Window is increased in 3 cases:
222*d14abf15SRobert Mustacchi * 1. RQ-placed bytes
223*d14abf15SRobert Mustacchi * 2. GRQ-Indicated succesfully (short/long loop, doensn't matter)
224*d14abf15SRobert Mustacchi * 3. Window-update from NDIS (initial rcv window increased)
225*d14abf15SRobert Mustacchi * 4. This function also takes into account dwa: delayed window algorithm, and updates the
226*d14abf15SRobert Mustacchi * data structures accordingly, however, not all window-updates are part of the dwa algorithm,
227*d14abf15SRobert Mustacchi * specifically, (3) therefore, we need to know if the update is dwa-aware or not.
228*d14abf15SRobert Mustacchi */
lm_tcp_rx_post_sws(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * rx_con,u32_t nbytes,u8_t op)229*d14abf15SRobert Mustacchi void lm_tcp_rx_post_sws (
230*d14abf15SRobert Mustacchi lm_device_t * pdev,
231*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
232*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con,
233*d14abf15SRobert Mustacchi u32_t nbytes,
234*d14abf15SRobert Mustacchi u8_t op
235*d14abf15SRobert Mustacchi )
236*d14abf15SRobert Mustacchi {
237*d14abf15SRobert Mustacchi volatile struct toe_rx_db_data *db_data = rx_con->db_data.rx;
238*d14abf15SRobert Mustacchi s32_t diff_to_fw;
239*d14abf15SRobert Mustacchi
240*d14abf15SRobert Mustacchi switch (op)
241*d14abf15SRobert Mustacchi {
242*d14abf15SRobert Mustacchi case TCP_RX_POST_SWS_INC:
243*d14abf15SRobert Mustacchi /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() INC: OLD drv_rcv_win_right_edge=%d, nbytes=%d, NEW drv_rcv_win_right_edge=%d FW right_edge=%d \n", rx_con->u.rx.sws_info.drv_rcv_win_right_edge, nbytes, rx_con->u.rx.sws_info.drv_rcv_win_right_edge + nbytes, db_data->rcv_win_right_edge);*/
244*d14abf15SRobert Mustacchi if (rx_con->u.rx.sws_info.extra_bytes > nbytes) {
245*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.extra_bytes -= nbytes;
246*d14abf15SRobert Mustacchi nbytes = 0;
247*d14abf15SRobert Mustacchi } else {
248*d14abf15SRobert Mustacchi nbytes -= rx_con->u.rx.sws_info.extra_bytes;
249*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.extra_bytes = 0;
250*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.drv_rcv_win_right_edge += nbytes;
251*d14abf15SRobert Mustacchi if (rx_con->u.rx.sws_info.drv_rcv_win_right_edge >= db_data->rcv_win_right_edge) {
252*d14abf15SRobert Mustacchi RESET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES);
253*d14abf15SRobert Mustacchi }
254*d14abf15SRobert Mustacchi }
255*d14abf15SRobert Mustacchi break;
256*d14abf15SRobert Mustacchi case TCP_RX_POST_SWS_DEC:
257*d14abf15SRobert Mustacchi if (rx_con->u.rx.sws_info.extra_bytes) {
258*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.extra_bytes += nbytes;
259*d14abf15SRobert Mustacchi nbytes = 0;
260*d14abf15SRobert Mustacchi }
261*d14abf15SRobert Mustacchi /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() DEC: OLD drv_rcv_win_right_edge=%d, nbytes=%d, NEW drv_rcv_win_right_edge=%d\n", rx_con->u.rx.sws_info.drv_rcv_win_right_edge, nbytes, rx_con->u.rx.sws_info.drv_rcv_win_right_edge - nbytes);*/
262*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.drv_rcv_win_right_edge -= nbytes;
263*d14abf15SRobert Mustacchi SET_FLAGS(db_data->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES);
264*d14abf15SRobert Mustacchi break;
265*d14abf15SRobert Mustacchi case TCP_RX_POST_SWS_SET:
266*d14abf15SRobert Mustacchi /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() SET: nbytes=%d\n", nbytes);*/
267*d14abf15SRobert Mustacchi db_data->rcv_win_right_edge = nbytes;
268*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.extra_bytes = 0;;
269*d14abf15SRobert Mustacchi break;
270*d14abf15SRobert Mustacchi default:
271*d14abf15SRobert Mustacchi DbgBreakMsg("lm_tcp_rx_post_sws: Invalid operation\n");
272*d14abf15SRobert Mustacchi return;
273*d14abf15SRobert Mustacchi }
274*d14abf15SRobert Mustacchi
275*d14abf15SRobert Mustacchi /* note that diff_to_fw could be negative due to possibility of window-decrease in LH */
276*d14abf15SRobert Mustacchi diff_to_fw = S32_SUB(rx_con->u.rx.sws_info.drv_rcv_win_right_edge, db_data->rcv_win_right_edge);
277*d14abf15SRobert Mustacchi
278*d14abf15SRobert Mustacchi /* If this update isn't dwa_aware, it's good to go... */
279*d14abf15SRobert Mustacchi
280*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARNl4, "###lm_tcp_rx_post_sws cid=%d num_bytes=%d diff_to_fw=%d \n", tcp->cid, nbytes, diff_to_fw );
281*d14abf15SRobert Mustacchi /* we give the window only if diff_to_fw is larger than mss, which also means only in case it is negative... */
282*d14abf15SRobert Mustacchi if ( ((diff_to_fw >= (s32_t)rx_con->u.rx.sws_info.mss) ||
283*d14abf15SRobert Mustacchi (diff_to_fw >= (((s32_t)tcp->tcp_cached.initial_rcv_wnd) / 2)))) {
284*d14abf15SRobert Mustacchi if (rx_con->u.rx.sws_info.timer_on) {
285*d14abf15SRobert Mustacchi /* Vladz TBD: Cancel the timer */
286*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.timer_on = 0;
287*d14abf15SRobert Mustacchi }
288*d14abf15SRobert Mustacchi
289*d14abf15SRobert Mustacchi /* Ring the Advertise Window doorbell here */
290*d14abf15SRobert Mustacchi if (!(tcp->rx_con->flags & TCP_RX_DB_BLOCKED) && !(tcp->rx_con->flags & TCP_RX_POST_BLOCKED)) {
291*d14abf15SRobert Mustacchi db_data->rcv_win_right_edge = rx_con->u.rx.sws_info.drv_rcv_win_right_edge;
292*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4rx,
293*d14abf15SRobert Mustacchi "_lm_tcp_adv_wnd_write_db: cid=%d, nbytes=%d\n",
294*d14abf15SRobert Mustacchi tcp->cid, diff_to_fw);
295*d14abf15SRobert Mustacchi TOE_RX_DOORBELL(pdev, tcp->cid);
296*d14abf15SRobert Mustacchi }
297*d14abf15SRobert Mustacchi } else {
298*d14abf15SRobert Mustacchi if ( ! rx_con->u.rx.sws_info.timer_on ) {
299*d14abf15SRobert Mustacchi /* Vladz TBD: schedule the timer here */
300*d14abf15SRobert Mustacchi rx_con->u.rx.sws_info.timer_on = 1;
301*d14abf15SRobert Mustacchi }
302*d14abf15SRobert Mustacchi }
303*d14abf15SRobert Mustacchi }
304*d14abf15SRobert Mustacchi
_lm_tcp_rx_set_bd(IN lm_frag_t * frag,IN u16_t flags,IN lm_bd_chain_t * rx_chain,IN u32_t dbg_bytes_prod)305*d14abf15SRobert Mustacchi static __inline toe_rx_bd_t * _lm_tcp_rx_set_bd (
306*d14abf15SRobert Mustacchi IN lm_frag_t * frag,
307*d14abf15SRobert Mustacchi IN u16_t flags,
308*d14abf15SRobert Mustacchi IN lm_bd_chain_t * rx_chain,
309*d14abf15SRobert Mustacchi IN u32_t dbg_bytes_prod /* Used for synchronizing between fw and driver rq-available-bytes
310*d14abf15SRobert Mustacchi * This is used only as a debug variable for asserting in the fw. */
311*d14abf15SRobert Mustacchi )
312*d14abf15SRobert Mustacchi {
313*d14abf15SRobert Mustacchi struct toe_rx_bd * rx_bd;
314*d14abf15SRobert Mustacchi
315*d14abf15SRobert Mustacchi /* hw limit: each bd can point to a buffer with max size of 64KB */
316*d14abf15SRobert Mustacchi DbgBreakIf(frag->size > TCP_MAX_SGE_SIZE || frag->size == 0);
317*d14abf15SRobert Mustacchi rx_bd = (struct toe_rx_bd *)lm_toe_bd_chain_produce_bd(rx_chain);
318*d14abf15SRobert Mustacchi rx_bd->addr_hi = frag->addr.as_u32.high;
319*d14abf15SRobert Mustacchi rx_bd->addr_lo = frag->addr.as_u32.low;
320*d14abf15SRobert Mustacchi rx_bd->flags = flags;
321*d14abf15SRobert Mustacchi rx_bd->size = (u16_t)frag->size;
322*d14abf15SRobert Mustacchi rx_bd->dbg_bytes_prod = dbg_bytes_prod;
323*d14abf15SRobert Mustacchi DbgMessage(NULL, VERBOSEl4rx, "Setting Rx BD flags=0x%x, bd_addr=0x%p, size=%d\n", rx_bd->flags, rx_bd, frag->size);
324*d14abf15SRobert Mustacchi return rx_bd;
325*d14abf15SRobert Mustacchi }
326*d14abf15SRobert Mustacchi
327*d14abf15SRobert Mustacchi
328*d14abf15SRobert Mustacchi /** Description
329*d14abf15SRobert Mustacchi * function completes nbytes on a single tcp buffer and completes the buffer if it is
330*d14abf15SRobert Mustacchi * completed.
331*d14abf15SRobert Mustacchi * Assumptions:
332*d14abf15SRobert Mustacchi * fp-lock is taken.
333*d14abf15SRobert Mustacchi * It is only called from lm_tcp_rx_post_buf!!!
334*d14abf15SRobert Mustacchi */
lm_tcp_complete_tcp_buf(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * con,lm_tcp_buffer_t * tcp_buf,u32_t completed_bytes)335*d14abf15SRobert Mustacchi static void lm_tcp_complete_tcp_buf(
336*d14abf15SRobert Mustacchi lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_con_t * con, lm_tcp_buffer_t * tcp_buf, u32_t completed_bytes)
337*d14abf15SRobert Mustacchi {
338*d14abf15SRobert Mustacchi s_list_t completed_bufs;
339*d14abf15SRobert Mustacchi s_list_entry_t * entry;
340*d14abf15SRobert Mustacchi
341*d14abf15SRobert Mustacchi DbgBreakIf(completed_bytes > tcp_buf->more_to_comp);
342*d14abf15SRobert Mustacchi tcp_buf->more_to_comp -= completed_bytes;
343*d14abf15SRobert Mustacchi con->app_buf_bytes_acc_comp += completed_bytes;
344*d14abf15SRobert Mustacchi
345*d14abf15SRobert Mustacchi if(tcp_buf->more_to_comp == 0 && GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_END)) {
346*d14abf15SRobert Mustacchi tcp_buf->app_buf_xferred = con->app_buf_bytes_acc_comp;
347*d14abf15SRobert Mustacchi DbgBreakIf(tcp_buf->app_buf_xferred > tcp_buf->app_buf_size); /* this may be partial completion */
348*d14abf15SRobert Mustacchi con->app_buf_bytes_acc_comp = 0;
349*d14abf15SRobert Mustacchi if (GET_FLAGS(con->flags, TCP_POST_COMPLETE_SPLIT)) {
350*d14abf15SRobert Mustacchi RESET_FLAGS(con->flags, TCP_POST_COMPLETE_SPLIT);
351*d14abf15SRobert Mustacchi }
352*d14abf15SRobert Mustacchi } else {
353*d14abf15SRobert Mustacchi tcp_buf->app_buf_xferred = 0;
354*d14abf15SRobert Mustacchi }
355*d14abf15SRobert Mustacchi
356*d14abf15SRobert Mustacchi if (tcp_buf->more_to_comp == 0) {
357*d14abf15SRobert Mustacchi /* should have nothing in the active tb list except this buffer, if we're completing this buffer,
358*d14abf15SRobert Mustacchi * it means that we had something in the peninsula, this means that at the end of the DPC there was
359*d14abf15SRobert Mustacchi * nothing in the active-tb-list, and between DPCs all posted buffers 'occupied' bytes from the peninsula
360*d14abf15SRobert Mustacchi * and were completed to the client. This means that there can be no RQ completions during the DPC that
361*d14abf15SRobert Mustacchi * will try to access the active tb list w/o a lock
362*d14abf15SRobert Mustacchi */
363*d14abf15SRobert Mustacchi DbgBreakIf(s_list_entry_cnt(&con->active_tb_list) != 1);
364*d14abf15SRobert Mustacchi lm_bd_chain_bds_consumed(&con->bd_chain, tcp_buf->bd_used);
365*d14abf15SRobert Mustacchi
366*d14abf15SRobert Mustacchi con->buffer_completed_cnt ++;
367*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4fp,
368*d14abf15SRobert Mustacchi "cid=%d, completing tcp buf towards mm from post-flow, actual_completed_bytes=%d\n",
369*d14abf15SRobert Mustacchi tcp->cid, tcp_buf->size);
370*d14abf15SRobert Mustacchi entry = s_list_pop_head(&con->active_tb_list);
371*d14abf15SRobert Mustacchi DbgBreakIf(con->rq_nbytes < tcp_buf->size);
372*d14abf15SRobert Mustacchi con->rq_nbytes -= tcp_buf->size;
373*d14abf15SRobert Mustacchi s_list_init(&completed_bufs, entry, entry, 1);
374*d14abf15SRobert Mustacchi con->rq_completion_calls++;
375*d14abf15SRobert Mustacchi mm_tcp_complete_bufs(pdev, tcp, con, &completed_bufs, LM_STATUS_SUCCESS);
376*d14abf15SRobert Mustacchi }
377*d14abf15SRobert Mustacchi }
378*d14abf15SRobert Mustacchi
379*d14abf15SRobert Mustacchi
380*d14abf15SRobert Mustacchi
lm_tcp_rx_cmp_process(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t completed_bytes,u8_t push)381*d14abf15SRobert Mustacchi void lm_tcp_rx_cmp_process(
382*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
383*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
384*d14abf15SRobert Mustacchi u32_t completed_bytes,
385*d14abf15SRobert Mustacchi u8_t push
386*d14abf15SRobert Mustacchi )
387*d14abf15SRobert Mustacchi {
388*d14abf15SRobert Mustacchi lm_tcp_con_t *rx_con;
389*d14abf15SRobert Mustacchi u32_t actual_bytes_completed;
390*d14abf15SRobert Mustacchi MM_INIT_TCP_LOCK_HANDLE();
391*d14abf15SRobert Mustacchi
392*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_cmp_process, completed_bytes=%d, push=%d cid=%d\n", completed_bytes, push, tcp->cid);
393*d14abf15SRobert Mustacchi DbgBreakIf(!(completed_bytes || push)); /* otherwise there is no point for this function to be called */
394*d14abf15SRobert Mustacchi
395*d14abf15SRobert Mustacchi rx_con = tcp->rx_con;
396*d14abf15SRobert Mustacchi DbgBreakIf(! rx_con);
397*d14abf15SRobert Mustacchi
398*d14abf15SRobert Mustacchi if (!(rx_con->flags & TCP_DEFERRED_PROCESSING)) {
399*d14abf15SRobert Mustacchi mm_acquire_tcp_lock(pdev, rx_con);
400*d14abf15SRobert Mustacchi }
401*d14abf15SRobert Mustacchi DbgBreakIf(rx_con->flags & TCP_RX_COMP_BLOCKED);
402*d14abf15SRobert Mustacchi
403*d14abf15SRobert Mustacchi /* RQ completions can't arrive while we have something in the peninsula (peninsula must either be completed or copied
404*d14abf15SRobert Mustacchi * to the app-buffer before) An RQ_SKP within the dpc will always take care of previous RQs waiting to be copied to. */
405*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.peninsula_list));
406*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.dpc_peninsula_list));
407*d14abf15SRobert Mustacchi
408*d14abf15SRobert Mustacchi actual_bytes_completed = lm_tcp_complete_nbytes(pdev, tcp, rx_con, completed_bytes , push);
409*d14abf15SRobert Mustacchi
410*d14abf15SRobert Mustacchi rx_con->bytes_comp_cnt += actual_bytes_completed;
411*d14abf15SRobert Mustacchi DbgBreakIf(S64_SUB(rx_con->bytes_post_cnt, rx_con->bytes_comp_cnt) < 0);
412*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "lm_tcp_rx_comp, after comp: pending=%d, active_bufs=%d\n",
413*d14abf15SRobert Mustacchi S64_SUB(rx_con->bytes_post_cnt, rx_con->bytes_comp_cnt),
414*d14abf15SRobert Mustacchi s_list_entry_cnt(&rx_con->active_tb_list));
415*d14abf15SRobert Mustacchi
416*d14abf15SRobert Mustacchi if ( completed_bytes ) {
417*d14abf15SRobert Mustacchi /* Vladz: TBD
418*d14abf15SRobert Mustacchi lm_neigh_update_nic_reachability_time(tcp->path->neigh) */
419*d14abf15SRobert Mustacchi }
420*d14abf15SRobert Mustacchi if (!(rx_con->flags & TCP_DEFERRED_PROCESSING)) {
421*d14abf15SRobert Mustacchi mm_release_tcp_lock(pdev, rx_con);
422*d14abf15SRobert Mustacchi }
423*d14abf15SRobert Mustacchi } /* lm_tcp_rx_comp */
424*d14abf15SRobert Mustacchi
425*d14abf15SRobert Mustacchi
lm_tcp_rx_skp_process(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t bytes_skipped,u8_t sb_idx)426*d14abf15SRobert Mustacchi void lm_tcp_rx_skp_process(
427*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
428*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
429*d14abf15SRobert Mustacchi u32_t bytes_skipped,
430*d14abf15SRobert Mustacchi u8_t sb_idx
431*d14abf15SRobert Mustacchi )
432*d14abf15SRobert Mustacchi {
433*d14abf15SRobert Mustacchi lm_tcp_con_t *rx_con;
434*d14abf15SRobert Mustacchi u32_t comp_bytes;
435*d14abf15SRobert Mustacchi MM_INIT_TCP_LOCK_HANDLE();
436*d14abf15SRobert Mustacchi
437*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_skp_process, bytes_skipped=%d, cid=%d\n", bytes_skipped, tcp->cid);
438*d14abf15SRobert Mustacchi
439*d14abf15SRobert Mustacchi if (bytes_skipped == 0) {
440*d14abf15SRobert Mustacchi /* nothing to do here - occurs on special fw case, where there is GRQ->RQ processing with no GRQ and no RQ,
441*d14abf15SRobert Mustacchi * this will usually happen at the beginning or in special cases of the connection */
442*d14abf15SRobert Mustacchi return;
443*d14abf15SRobert Mustacchi }
444*d14abf15SRobert Mustacchi
445*d14abf15SRobert Mustacchi rx_con = tcp->rx_con;
446*d14abf15SRobert Mustacchi DbgBreakIf(! rx_con);
447*d14abf15SRobert Mustacchi
448*d14abf15SRobert Mustacchi if (!GET_FLAGS(rx_con->flags, TCP_DEFERRED_PROCESSING)) {
449*d14abf15SRobert Mustacchi mm_acquire_tcp_lock(pdev, rx_con);
450*d14abf15SRobert Mustacchi }
451*d14abf15SRobert Mustacchi DbgBreakIf(GET_FLAGS(rx_con->flags, TCP_RX_COMP_BLOCKED));
452*d14abf15SRobert Mustacchi
453*d14abf15SRobert Mustacchi comp_bytes = min(bytes_skipped, tcp->rx_con->u.rx.skp_bytes_copied);
454*d14abf15SRobert Mustacchi if (comp_bytes) {
455*d14abf15SRobert Mustacchi tcp->rx_con->bytes_comp_cnt += comp_bytes;
456*d14abf15SRobert Mustacchi /* complete nbytes on buffers (dpc-flow ) */
457*d14abf15SRobert Mustacchi lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, comp_bytes, /* push=*/ 0);
458*d14abf15SRobert Mustacchi bytes_skipped -= comp_bytes;
459*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.skp_bytes_copied -= comp_bytes;
460*d14abf15SRobert Mustacchi }
461*d14abf15SRobert Mustacchi
462*d14abf15SRobert Mustacchi /* We know for sure, that all the application buffers we are about to access have already been posted
463*d14abf15SRobert Mustacchi * before the dpc, and therefore are valid in the active_tb_list.
464*d14abf15SRobert Mustacchi * TBA Michals: bypass FW
465*d14abf15SRobert Mustacchi */
466*d14abf15SRobert Mustacchi if (bytes_skipped) {
467*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.peninsula_list));
468*d14abf15SRobert Mustacchi DbgBreakIfAll(d_list_is_empty(&rx_con->u.rx.gen_info.dpc_peninsula_list));
469*d14abf15SRobert Mustacchi DbgBreakIf(((lm_tcp_gen_buf_t *)d_list_peek_head(&rx_con->u.rx.gen_info.dpc_peninsula_list))->placed_bytes == 0);
470*d14abf15SRobert Mustacchi rx_con->u.rx.gen_info.bytes_copied_cnt_in_process += lm_tcp_rx_peninsula_to_rq(pdev, tcp, bytes_skipped,sb_idx);
471*d14abf15SRobert Mustacchi }
472*d14abf15SRobert Mustacchi
473*d14abf15SRobert Mustacchi if (!GET_FLAGS(rx_con->flags, TCP_DEFERRED_PROCESSING)) {
474*d14abf15SRobert Mustacchi mm_release_tcp_lock(pdev, rx_con);
475*d14abf15SRobert Mustacchi }
476*d14abf15SRobert Mustacchi } /* lm_tcp_rx_skp */
477*d14abf15SRobert Mustacchi
lm_tcp_rx_delete_isle(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t sb_idx,u8_t num_isle,u32_t num_of_isles)478*d14abf15SRobert Mustacchi void lm_tcp_rx_delete_isle(
479*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
480*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
481*d14abf15SRobert Mustacchi u8_t sb_idx,
482*d14abf15SRobert Mustacchi u8_t num_isle,
483*d14abf15SRobert Mustacchi u32_t num_of_isles)
484*d14abf15SRobert Mustacchi {
485*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
486*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
487*d14abf15SRobert Mustacchi d_list_t removed_list;
488*d14abf15SRobert Mustacchi u32_t isle_nbytes;
489*d14abf15SRobert Mustacchi
490*d14abf15SRobert Mustacchi
491*d14abf15SRobert Mustacchi
492*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_delete_isle cid=%d isle=%d\n", tcp->cid, num_isle);
493*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
494*d14abf15SRobert Mustacchi d_list_init(&removed_list, NULL, NULL, 0);
495*d14abf15SRobert Mustacchi
496*d14abf15SRobert Mustacchi while (num_of_isles) {
497*d14abf15SRobert Mustacchi isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, num_isle + (num_of_isles - 1), &removed_list);
498*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
499*d14abf15SRobert Mustacchi DbgBreakIf(isle_nbytes > gen_info->isle_nbytes);
500*d14abf15SRobert Mustacchi gen_info->isle_nbytes -= isle_nbytes;
501*d14abf15SRobert Mustacchi num_of_isles--;
502*d14abf15SRobert Mustacchi }
503*d14abf15SRobert Mustacchi
504*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta -= (s32_t)d_list_entry_cnt(&removed_list);
505*d14abf15SRobert Mustacchi if (!d_list_is_empty(&removed_list)) {
506*d14abf15SRobert Mustacchi lm_tcp_return_list_of_gen_bufs(pdev,tcp ,&removed_list, MM_TCP_RGB_COLLECT_GEN_BUFS, sb_idx);
507*d14abf15SRobert Mustacchi tcp->rx_con->droped_non_empty_isles++;
508*d14abf15SRobert Mustacchi } else {
509*d14abf15SRobert Mustacchi DbgBreak();
510*d14abf15SRobert Mustacchi tcp->rx_con->droped_empty_isles++;
511*d14abf15SRobert Mustacchi }
512*d14abf15SRobert Mustacchi rx_con->dpc_info.dpc_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
513*d14abf15SRobert Mustacchi return;
514*d14abf15SRobert Mustacchi }
515*d14abf15SRobert Mustacchi
lm_toe_is_rx_completion(lm_device_t * pdev,u8_t drv_toe_rss_id)516*d14abf15SRobert Mustacchi u8_t lm_toe_is_rx_completion(lm_device_t *pdev, u8_t drv_toe_rss_id)
517*d14abf15SRobert Mustacchi {
518*d14abf15SRobert Mustacchi u8_t result = FALSE;
519*d14abf15SRobert Mustacchi lm_tcp_rcq_t *rcq = NULL;
520*d14abf15SRobert Mustacchi
521*d14abf15SRobert Mustacchi DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.rcqs) > drv_toe_rss_id));
522*d14abf15SRobert Mustacchi
523*d14abf15SRobert Mustacchi rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
524*d14abf15SRobert Mustacchi
525*d14abf15SRobert Mustacchi if ( rcq->hw_con_idx_ptr &&
526*d14abf15SRobert Mustacchi *rcq->hw_con_idx_ptr != lm_bd_chain_cons_idx(&rcq->bd_chain) )
527*d14abf15SRobert Mustacchi {
528*d14abf15SRobert Mustacchi result = TRUE;
529*d14abf15SRobert Mustacchi }
530*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4int, "lm_toe_is_rx_completion(): result is:%s\n", result? "TRUE" : "FALSE");
531*d14abf15SRobert Mustacchi
532*d14abf15SRobert Mustacchi return result;
533*d14abf15SRobert Mustacchi }
534*d14abf15SRobert Mustacchi
535*d14abf15SRobert Mustacchi /** Description
536*d14abf15SRobert Mustacchi * checks if the processing of a certain RCQ is suspended
537*d14abf15SRobert Mustacchi */
lm_toe_is_rcq_suspended(lm_device_t * pdev,u8_t drv_toe_rss_id)538*d14abf15SRobert Mustacchi u8_t lm_toe_is_rcq_suspended(lm_device_t *pdev, u8_t drv_toe_rss_id)
539*d14abf15SRobert Mustacchi {
540*d14abf15SRobert Mustacchi u8_t result = FALSE;
541*d14abf15SRobert Mustacchi lm_tcp_rcq_t *rcq = NULL;
542*d14abf15SRobert Mustacchi
543*d14abf15SRobert Mustacchi if (drv_toe_rss_id < MAX_L4_RX_CHAIN)
544*d14abf15SRobert Mustacchi {
545*d14abf15SRobert Mustacchi rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
546*d14abf15SRobert Mustacchi if (rcq->suspend_processing) {
547*d14abf15SRobert Mustacchi result = TRUE;
548*d14abf15SRobert Mustacchi }
549*d14abf15SRobert Mustacchi }
550*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4int, "lm_toe_is_rcq_suspended(): sb_idx:%d, result is:%s\n", drv_toe_rss_id, result?"TRUE":"FALSE");
551*d14abf15SRobert Mustacchi return result;
552*d14abf15SRobert Mustacchi }
553*d14abf15SRobert Mustacchi
554*d14abf15SRobert Mustacchi
555*d14abf15SRobert Mustacchi /** Description
556*d14abf15SRobert Mustacchi * Increment consumed generic counter for a connection.
557*d14abf15SRobert Mustacchi * To avoid rollover in the FW if the counter exceeds a maximum threshold, the driver should
558*d14abf15SRobert Mustacchi * not wait for application buffers and post 'receive window update' doorbell immediately.
559*d14abf15SRobert Mustacchi * The FW holds 32bits for this counter. Therefore a threshold of 100MB is OK.
560*d14abf15SRobert Mustacchi */
lm_tcp_incr_consumed_gen(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes)561*d14abf15SRobert Mustacchi static void lm_tcp_incr_consumed_gen(
562*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
563*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
564*d14abf15SRobert Mustacchi u32_t nbytes
565*d14abf15SRobert Mustacchi )
566*d14abf15SRobert Mustacchi {
567*d14abf15SRobert Mustacchi volatile struct toe_rx_db_data *db_data = tcp->rx_con->db_data.rx;
568*d14abf15SRobert Mustacchi
569*d14abf15SRobert Mustacchi db_data->consumed_grq_bytes += nbytes;
570*d14abf15SRobert Mustacchi
571*d14abf15SRobert Mustacchi /* theres no need to increased the consumed_cnt in two stages (one in the driver and one for FW db_data)
572*d14abf15SRobert Mustacchi * we can always directly increase FW db_data, we need to decide whether we need to give a doorbell, basically
573*d14abf15SRobert Mustacchi * we have two cases where doorbells are given: (1) buffer posted and bypasses fw (2) indication succeeded in which case
574*d14abf15SRobert Mustacchi * window will also be increased, however, the window isn't always increased: if it's smaller than MSS, so, if we
575*d14abf15SRobert Mustacchi * increase the consumed count by something smaller than mss - we'll give the doorbell here... */
576*d14abf15SRobert Mustacchi
577*d14abf15SRobert Mustacchi if (nbytes < tcp->rx_con->u.rx.sws_info.mss) {
578*d14abf15SRobert Mustacchi if (!(tcp->rx_con->flags & TCP_RX_DB_BLOCKED)) {
579*d14abf15SRobert Mustacchi TOE_RX_DOORBELL(pdev, tcp->cid);
580*d14abf15SRobert Mustacchi }
581*d14abf15SRobert Mustacchi }
582*d14abf15SRobert Mustacchi }
583*d14abf15SRobert Mustacchi
584*d14abf15SRobert Mustacchi /** Description
585*d14abf15SRobert Mustacchi * Copies as many bytes as possible from the peninsula to the single tcp buffer received
586*d14abf15SRobert Mustacchi * updates the peninsula.
587*d14abf15SRobert Mustacchi * This function can be called from two flows:
588*d14abf15SRobert Mustacchi * 1. Post of a buffer
589*d14abf15SRobert Mustacchi * 2. Completion of a dpc.
590*d14abf15SRobert Mustacchi * We need to know which flow it is called from to know which peninsula list to use:
591*d14abf15SRobert Mustacchi * dpc_peninsula_list / peninsula_list.
592*d14abf15SRobert Mustacchi * Post ALWAYS uses the peninsula_list, since it doesn't know about the dpc_peninsula
593*d14abf15SRobert Mustacchi * Completion ALWAYS uses the dpc_peninsula_list, and in this case peninsula_list MUST be empty
594*d14abf15SRobert Mustacchi * this is because there can be buffers in the active_tb_list ONLY if peninsula_list is empty.
595*d14abf15SRobert Mustacchi *
596*d14abf15SRobert Mustacchi * first_buf_offset refers to the peninsula we're dealing with, at the end of the dpc the dpc_peninsula
597*d14abf15SRobert Mustacchi * is copied to the peninsula, therefore first_buf_offset will still be valid. copying from post means that
598*d14abf15SRobert Mustacchi * there is something in the peninsula which means theres nothing in the active_tb_list ==> won't be a copy from
599*d14abf15SRobert Mustacchi * dpc. Copying from dpc means theres something in the active-tb-list ==> nothing in the peninsula ==> won't be called
600*d14abf15SRobert Mustacchi * from post, mutual exclusion exists between the post/dpc of copying, therefore we can have only one first_buffer_offset
601*d14abf15SRobert Mustacchi * all other accesses (indication) are done under a lock.
602*d14abf15SRobert Mustacchi * param: dpc - indicates if this is called from the dpc or not (post)
603*d14abf15SRobert Mustacchi * Assumptions:
604*d14abf15SRobert Mustacchi * tcp_buf->more_to_comp is initialized
605*d14abf15SRobert Mustacchi * tcp_buf->size is initialized
606*d14abf15SRobert Mustacchi * num_bufs_complete is initialized by caller (could differ from zero)
607*d14abf15SRobert Mustacchi * Returns
608*d14abf15SRobert Mustacchi * the actual number of bytes copied
609*d14abf15SRobert Mustacchi * num_bufs_complete is the number of buffers that were completely copied to the pool and can be
610*d14abf15SRobert Mustacchi * returned to the pool.
611*d14abf15SRobert Mustacchi */
lm_tcp_rx_peninsula_to_rq_copy(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,d_list_t * return_list,u32_t max_num_bytes_to_copy,u8_t dpc)612*d14abf15SRobert Mustacchi static u32_t lm_tcp_rx_peninsula_to_rq_copy(
613*d14abf15SRobert Mustacchi lm_device_t * pdev,
614*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
615*d14abf15SRobert Mustacchi lm_tcp_buffer_t * tcp_buf,
616*d14abf15SRobert Mustacchi d_list_t * return_list,
617*d14abf15SRobert Mustacchi u32_t max_num_bytes_to_copy,
618*d14abf15SRobert Mustacchi u8_t dpc)
619*d14abf15SRobert Mustacchi {
620*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * curr_gen_buf;
621*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
622*d14abf15SRobert Mustacchi d_list_t * peninsula;
623*d14abf15SRobert Mustacchi u32_t tcp_offset;
624*d14abf15SRobert Mustacchi u32_t ncopy;
625*d14abf15SRobert Mustacchi u32_t bytes_left;
626*d14abf15SRobert Mustacchi u32_t bytes_copied = 0;
627*d14abf15SRobert Mustacchi
628*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_peninsula_to_rq_copy tcp_buf = 0x%x cid=%d\n", *((u32_t *)&tcp_buf), tcp->cid);
629*d14abf15SRobert Mustacchi
630*d14abf15SRobert Mustacchi gen_info = &tcp->rx_con->u.rx.gen_info;
631*d14abf15SRobert Mustacchi
632*d14abf15SRobert Mustacchi if (dpc) {
633*d14abf15SRobert Mustacchi peninsula = &gen_info->dpc_peninsula_list;
634*d14abf15SRobert Mustacchi } else {
635*d14abf15SRobert Mustacchi peninsula = &gen_info->peninsula_list;
636*d14abf15SRobert Mustacchi }
637*d14abf15SRobert Mustacchi
638*d14abf15SRobert Mustacchi curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(peninsula);
639*d14abf15SRobert Mustacchi tcp_offset = tcp_buf->size - tcp_buf->more_to_comp;
640*d14abf15SRobert Mustacchi bytes_left = min(tcp_buf->more_to_comp, max_num_bytes_to_copy); /* copy to buffer only what's aloud...*/
641*d14abf15SRobert Mustacchi
642*d14abf15SRobert Mustacchi /* start copying as much as possible from peninsula to tcp buffer */
643*d14abf15SRobert Mustacchi while (bytes_left && curr_gen_buf && curr_gen_buf->placed_bytes) {
644*d14abf15SRobert Mustacchi ncopy = curr_gen_buf->placed_bytes - gen_info->first_buf_offset;
645*d14abf15SRobert Mustacchi if (ncopy > bytes_left) {
646*d14abf15SRobert Mustacchi ncopy = bytes_left;
647*d14abf15SRobert Mustacchi }
648*d14abf15SRobert Mustacchi if (mm_tcp_copy_to_tcp_buf(pdev, tcp, tcp_buf,
649*d14abf15SRobert Mustacchi curr_gen_buf->buf_virt + gen_info->first_buf_offset, /* start of data in generic buffer */
650*d14abf15SRobert Mustacchi tcp_offset, ncopy) != ncopy)
651*d14abf15SRobert Mustacchi {
652*d14abf15SRobert Mustacchi gen_info->copy_gen_buf_dmae_cnt++;
653*d14abf15SRobert Mustacchi
654*d14abf15SRobert Mustacchi /* If this is generic buffer that has the free_when_done flag on it means it's non-cached memory and not physical
655*d14abf15SRobert Mustacchi * memory -> so, we can't try and dmae to it... not likely to happen... */
656*d14abf15SRobert Mustacchi if (!GET_FLAGS(curr_gen_buf->flags, GEN_FLAG_FREE_WHEN_DONE)) {
657*d14abf15SRobert Mustacchi if (mm_tcp_rx_peninsula_to_rq_copy_dmae(pdev,
658*d14abf15SRobert Mustacchi tcp,
659*d14abf15SRobert Mustacchi curr_gen_buf->buf_phys,
660*d14abf15SRobert Mustacchi gen_info->first_buf_offset, /* start of data in generic buffer */
661*d14abf15SRobert Mustacchi tcp_buf,
662*d14abf15SRobert Mustacchi tcp_offset,
663*d14abf15SRobert Mustacchi ncopy) != ncopy)
664*d14abf15SRobert Mustacchi {
665*d14abf15SRobert Mustacchi DbgBreakMsg("Unable To Copy");
666*d14abf15SRobert Mustacchi gen_info->copy_gen_buf_fail_cnt++;
667*d14abf15SRobert Mustacchi
668*d14abf15SRobert Mustacchi break;
669*d14abf15SRobert Mustacchi }
670*d14abf15SRobert Mustacchi } else {
671*d14abf15SRobert Mustacchi DbgBreakMsg("Unable To Copy");
672*d14abf15SRobert Mustacchi gen_info->copy_gen_buf_fail_cnt++;
673*d14abf15SRobert Mustacchi
674*d14abf15SRobert Mustacchi break;
675*d14abf15SRobert Mustacchi }
676*d14abf15SRobert Mustacchi }
677*d14abf15SRobert Mustacchi
678*d14abf15SRobert Mustacchi /* update peninsula */
679*d14abf15SRobert Mustacchi bytes_copied += ncopy;
680*d14abf15SRobert Mustacchi
681*d14abf15SRobert Mustacchi gen_info->first_buf_offset += (u16_t)ncopy;
682*d14abf15SRobert Mustacchi
683*d14abf15SRobert Mustacchi /* done with the generic buffer? - return it to the pool */
684*d14abf15SRobert Mustacchi if (curr_gen_buf->placed_bytes == gen_info->first_buf_offset) {
685*d14abf15SRobert Mustacchi curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_head(peninsula);
686*d14abf15SRobert Mustacchi d_list_push_tail(return_list, &curr_gen_buf->link);
687*d14abf15SRobert Mustacchi gen_info->first_buf_offset = 0;
688*d14abf15SRobert Mustacchi gen_info->num_buffers_copied_grq++;
689*d14abf15SRobert Mustacchi curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(peninsula);
690*d14abf15SRobert Mustacchi }
691*d14abf15SRobert Mustacchi
692*d14abf15SRobert Mustacchi /* update tcp buf stuff */
693*d14abf15SRobert Mustacchi bytes_left -= ncopy;
694*d14abf15SRobert Mustacchi tcp_offset += ncopy;
695*d14abf15SRobert Mustacchi }
696*d14abf15SRobert Mustacchi
697*d14abf15SRobert Mustacchi if (dpc) {
698*d14abf15SRobert Mustacchi gen_info->dpc_peninsula_nbytes -= bytes_copied;
699*d14abf15SRobert Mustacchi } else {
700*d14abf15SRobert Mustacchi gen_info->peninsula_nbytes -= bytes_copied;
701*d14abf15SRobert Mustacchi }
702*d14abf15SRobert Mustacchi
703*d14abf15SRobert Mustacchi /* return the number of bytes actually copied */
704*d14abf15SRobert Mustacchi return bytes_copied;
705*d14abf15SRobert Mustacchi }
706*d14abf15SRobert Mustacchi
707*d14abf15SRobert Mustacchi /** Description
708*d14abf15SRobert Mustacchi * function copies data from the peninsula to tcp buffers already placed in the
709*d14abf15SRobert Mustacchi * active_tb_list. The function completes the buffers if a tcp buffer from active_tb_list
710*d14abf15SRobert Mustacchi * was partially/fully filled. This case simulates a call to lm_tcp_rx_comp
711*d14abf15SRobert Mustacchi * (i.e. a completion received from firmware)
712*d14abf15SRobert Mustacchi * Assumptions:
713*d14abf15SRobert Mustacchi */
lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t max_num_bytes_to_copy,u8_t sb_idx)714*d14abf15SRobert Mustacchi u32_t lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t max_num_bytes_to_copy, u8_t sb_idx)
715*d14abf15SRobert Mustacchi {
716*d14abf15SRobert Mustacchi lm_tcp_buffer_t * curr_tcp_buf;
717*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
718*d14abf15SRobert Mustacchi d_list_t return_list;
719*d14abf15SRobert Mustacchi u32_t copied_bytes = 0, currently_copied = 0;
720*d14abf15SRobert Mustacchi
721*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_peninsula_to_rq cid=%d\n", tcp->cid);
722*d14abf15SRobert Mustacchi
723*d14abf15SRobert Mustacchi gen_info = &tcp->rx_con->u.rx.gen_info;
724*d14abf15SRobert Mustacchi
725*d14abf15SRobert Mustacchi DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called - no copying should be done */
726*d14abf15SRobert Mustacchi
727*d14abf15SRobert Mustacchi /* Copy data from dpc_peninsula to tcp buffer[s] */
728*d14abf15SRobert Mustacchi d_list_init(&return_list, NULL, NULL, 0);
729*d14abf15SRobert Mustacchi
730*d14abf15SRobert Mustacchi curr_tcp_buf = lm_tcp_next_entry_dpc_active_list(tcp->rx_con);
731*d14abf15SRobert Mustacchi
732*d14abf15SRobert Mustacchi /* TBA Michals: FW Bypass First check if we can copy to bypass buffers */
733*d14abf15SRobert Mustacchi
734*d14abf15SRobert Mustacchi /* Copy the number of bytes received in SKP */
735*d14abf15SRobert Mustacchi while (max_num_bytes_to_copy && gen_info->dpc_peninsula_nbytes && curr_tcp_buf) {
736*d14abf15SRobert Mustacchi currently_copied = lm_tcp_rx_peninsula_to_rq_copy(pdev, tcp, curr_tcp_buf, &return_list, max_num_bytes_to_copy, TRUE);
737*d14abf15SRobert Mustacchi curr_tcp_buf = (lm_tcp_buffer_t *)s_list_next_entry(&curr_tcp_buf->link);
738*d14abf15SRobert Mustacchi DbgBreakIf(max_num_bytes_to_copy < currently_copied);
739*d14abf15SRobert Mustacchi max_num_bytes_to_copy -= currently_copied;
740*d14abf15SRobert Mustacchi copied_bytes += currently_copied;
741*d14abf15SRobert Mustacchi }
742*d14abf15SRobert Mustacchi
743*d14abf15SRobert Mustacchi if (!d_list_is_empty(&return_list)) {
744*d14abf15SRobert Mustacchi
745*d14abf15SRobert Mustacchi lm_tcp_return_list_of_gen_bufs(pdev,tcp , &return_list,
746*d14abf15SRobert Mustacchi (sb_idx != NON_EXISTENT_SB_IDX) ? MM_TCP_RGB_COLLECT_GEN_BUFS : 0, sb_idx);
747*d14abf15SRobert Mustacchi }
748*d14abf15SRobert Mustacchi
749*d14abf15SRobert Mustacchi /* If we've copied to a buffer in the active_tb_list we need to complete it since fw knows
750*d14abf15SRobert Mustacchi * the driver has the bytes and the driver will take care of copying them and completing them.
751*d14abf15SRobert Mustacchi * this path simulates a call to lm_tcp_rx_comp (buffers taken from active_tb_list) */
752*d14abf15SRobert Mustacchi /* Note that pending bytes here could reach a negative value if a partial
753*d14abf15SRobert Mustacchi * application buffer was posted and the doorbell hasn't been given yet, however,
754*d14abf15SRobert Mustacchi * once the doorbell is given for the application buffer the pending bytes will reach a non-negative
755*d14abf15SRobert Mustacchi * value (>=0) */
756*d14abf15SRobert Mustacchi tcp->rx_con->bytes_comp_cnt += copied_bytes;
757*d14abf15SRobert Mustacchi /* complete nbytes on buffers (dpc-flow ) */
758*d14abf15SRobert Mustacchi lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, copied_bytes, /* push=*/ 0);
759*d14abf15SRobert Mustacchi
760*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "lm_tcp_rx_peninsula_to_rq copied %d bytes cid=%d\n", copied_bytes, tcp->cid);
761*d14abf15SRobert Mustacchi return copied_bytes;
762*d14abf15SRobert Mustacchi }
763*d14abf15SRobert Mustacchi
764*d14abf15SRobert Mustacchi /** Description
765*d14abf15SRobert Mustacchi * determines whether or not we can indicate.
766*d14abf15SRobert Mustacchi * Rules:
767*d14abf15SRobert Mustacchi * - Indication is not blocked
768*d14abf15SRobert Mustacchi * - we are not in the middle of completion a split-buffer
769*d14abf15SRobert Mustacchi * we can only indicate after an entire buffer has been completed/copied to.
770*d14abf15SRobert Mustacchi * we determine this by the app_buf_bytes_acc_comp. This is to avoid the
771*d14abf15SRobert Mustacchi * following data integrity race:
772*d14abf15SRobert Mustacchi * application buffer: app_start, app_end
773*d14abf15SRobert Mustacchi * app_start is posted, peninsula copied to app_start, app_start completed to
774*d14abf15SRobert Mustacchi * fw then the rest is indicated. fw receives app_end, fw thinks peninsula was
775*d14abf15SRobert Mustacchi * copied to buffer, application buffer misses data...
776*d14abf15SRobert Mustacchi * - our active_tb_list is empty... we HAVE to make sure to
777*d14abf15SRobert Mustacchi * always indicate after we've fully utilized our RQ
778*d14abf15SRobert Mustacchi * buffers...
779*d14abf15SRobert Mustacchi */
_lm_tcp_ok_to_indicate(lm_tcp_con_t * rx_con)780*d14abf15SRobert Mustacchi static __inline u8_t _lm_tcp_ok_to_indicate(lm_tcp_con_t * rx_con)
781*d14abf15SRobert Mustacchi {
782*d14abf15SRobert Mustacchi return (!(rx_con->flags & TCP_RX_IND_BLOCKED) && (rx_con->app_buf_bytes_acc_comp == 0) &&
783*d14abf15SRobert Mustacchi (s_list_is_empty(&rx_con->active_tb_list)));
784*d14abf15SRobert Mustacchi }
785*d14abf15SRobert Mustacchi
786*d14abf15SRobert Mustacchi /** Description
787*d14abf15SRobert Mustacchi * GA: add a buffer to the peninsula - nbytes represents the number of bytes in the previous buffer.
788*d14abf15SRobert Mustacchi * GR: release a buffer from the peninsula - nbytes represents the number of bytes in the current buffer.
789*d14abf15SRobert Mustacchi * Assumption:
790*d14abf15SRobert Mustacchi * GR can only be called on a buffer that had been added using GA before
791*d14abf15SRobert Mustacchi */
lm_tcp_rx_gen_peninsula_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf)792*d14abf15SRobert Mustacchi void lm_tcp_rx_gen_peninsula_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf)
793*d14abf15SRobert Mustacchi {
794*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
795*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
796*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * last_gen_buf;
797*d14abf15SRobert Mustacchi
798*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_peninsula_process, nbytes=%d, cid=%d add=%s\n", nbytes, tcp->cid,
799*d14abf15SRobert Mustacchi (gen_buf)? "TRUE" : "FALSE");
800*d14abf15SRobert Mustacchi
801*d14abf15SRobert Mustacchi DbgBreakIf(rx_con->flags & TCP_RX_COMP_BLOCKED);
802*d14abf15SRobert Mustacchi
803*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
804*d14abf15SRobert Mustacchi
805*d14abf15SRobert Mustacchi /* update the previous buffer OR current buffer if this is a release operation. This function is always called
806*d14abf15SRobert Mustacchi * from within a DPC and updates the dpc_peninsula */
807*d14abf15SRobert Mustacchi if (nbytes) {
808*d14abf15SRobert Mustacchi gen_info->dpc_peninsula_nbytes += nbytes;
809*d14abf15SRobert Mustacchi last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->dpc_peninsula_list);
810*d14abf15SRobert Mustacchi DbgBreakIfAll(last_gen_buf == NULL);
811*d14abf15SRobert Mustacchi DbgBreakIfAll(last_gen_buf->placed_bytes != 0);
812*d14abf15SRobert Mustacchi DbgBreakIfAll(nbytes > LM_TCP_GEN_BUF_SIZE(pdev));
813*d14abf15SRobert Mustacchi last_gen_buf->placed_bytes = (u16_t)nbytes;
814*d14abf15SRobert Mustacchi }
815*d14abf15SRobert Mustacchi
816*d14abf15SRobert Mustacchi if (gen_buf /* add */) {
817*d14abf15SRobert Mustacchi DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
818*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
819*d14abf15SRobert Mustacchi
820*d14abf15SRobert Mustacchi d_list_push_tail(&gen_info->dpc_peninsula_list, &gen_buf->link);
821*d14abf15SRobert Mustacchi }
822*d14abf15SRobert Mustacchi
823*d14abf15SRobert Mustacchi }
824*d14abf15SRobert Mustacchi
lm_tcp_rx_gen_isle_create(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)825*d14abf15SRobert Mustacchi void lm_tcp_rx_gen_isle_create(lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
826*d14abf15SRobert Mustacchi {
827*d14abf15SRobert Mustacchi lm_isle_t * current_isle = NULL;
828*d14abf15SRobert Mustacchi lm_isle_t * next_isle = NULL;
829*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
830*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
831*d14abf15SRobert Mustacchi u8_t isles_cnt;
832*d14abf15SRobert Mustacchi d_list_entry_t * isle_entry_prev = NULL;
833*d14abf15SRobert Mustacchi d_list_entry_t * isle_entry_next = NULL;
834*d14abf15SRobert Mustacchi
835*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
836*d14abf15SRobert Mustacchi isles_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
837*d14abf15SRobert Mustacchi if (isles_cnt) {
838*d14abf15SRobert Mustacchi DbgBreakIf(isles_cnt == T_TCP_MAX_ISLES_PER_CONNECTION_TOE);
839*d14abf15SRobert Mustacchi current_isle = _lm_tcp_isle_get_free_list(pdev, sb_idx);
840*d14abf15SRobert Mustacchi DbgBreakIf(!current_isle);
841*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
842*d14abf15SRobert Mustacchi DbgBreakIf(current_isle->dedicated_cid != 0);
843*d14abf15SRobert Mustacchi current_isle->dedicated_cid = tcp->cid;
844*d14abf15SRobert Mustacchi #endif
845*d14abf15SRobert Mustacchi } else {
846*d14abf15SRobert Mustacchi current_isle = &gen_info->first_isle;
847*d14abf15SRobert Mustacchi }
848*d14abf15SRobert Mustacchi
849*d14abf15SRobert Mustacchi d_list_push_head(¤t_isle->isle_gen_bufs_list_head, &gen_buf->link);
850*d14abf15SRobert Mustacchi current_isle->isle_nbytes = 0;
851*d14abf15SRobert Mustacchi if (isle_num == 1) {
852*d14abf15SRobert Mustacchi if (current_isle != &gen_info->first_isle) {
853*d14abf15SRobert Mustacchi *current_isle = gen_info->first_isle;
854*d14abf15SRobert Mustacchi d_list_init(&gen_info->first_isle.isle_gen_bufs_list_head, NULL, NULL, 0);
855*d14abf15SRobert Mustacchi d_list_push_head(&gen_info->first_isle.isle_gen_bufs_list_head, &gen_buf->link);
856*d14abf15SRobert Mustacchi gen_info->first_isle.isle_nbytes = 0;
857*d14abf15SRobert Mustacchi isle_entry_prev = &gen_info->first_isle.isle_link;
858*d14abf15SRobert Mustacchi isle_entry_next = gen_info->first_isle.isle_link.next;
859*d14abf15SRobert Mustacchi }
860*d14abf15SRobert Mustacchi } else if (isle_num <= isles_cnt) {
861*d14abf15SRobert Mustacchi next_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
862*d14abf15SRobert Mustacchi isle_entry_prev = next_isle->isle_link.prev;
863*d14abf15SRobert Mustacchi isle_entry_next = &next_isle->isle_link;
864*d14abf15SRobert Mustacchi } else if (isle_num == (isles_cnt + 1)) {
865*d14abf15SRobert Mustacchi isle_entry_next = NULL;
866*d14abf15SRobert Mustacchi isle_entry_prev = gen_info->isles_list.tail;
867*d14abf15SRobert Mustacchi } else {
868*d14abf15SRobert Mustacchi DbgBreak();
869*d14abf15SRobert Mustacchi }
870*d14abf15SRobert Mustacchi
871*d14abf15SRobert Mustacchi d_list_insert_entry(&gen_info->isles_list, isle_entry_prev, isle_entry_next, ¤t_isle->isle_link);
872*d14abf15SRobert Mustacchi if (isle_num == 1) {
873*d14abf15SRobert Mustacchi current_isle = &gen_info->first_isle;
874*d14abf15SRobert Mustacchi }
875*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
876*d14abf15SRobert Mustacchi SET_DEBUG_OOO_INFO(current_isle, CMP_OPCODE_TOE_GNI, 0);
877*d14abf15SRobert Mustacchi #endif
878*d14abf15SRobert Mustacchi gen_info->current_isle = current_isle;
879*d14abf15SRobert Mustacchi gen_info->current_isle_number = isle_num;
880*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].number_of_isles_delta++;
881*d14abf15SRobert Mustacchi if (isles_cnt == gen_info->max_number_of_isles) {
882*d14abf15SRobert Mustacchi gen_info->max_number_of_isles++;
883*d14abf15SRobert Mustacchi }
884*d14abf15SRobert Mustacchi }
885*d14abf15SRobert Mustacchi
lm_tcp_rx_gen_isle_right_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)886*d14abf15SRobert Mustacchi void lm_tcp_rx_gen_isle_right_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
887*d14abf15SRobert Mustacchi {
888*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
889*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
890*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * last_gen_buf;
891*d14abf15SRobert Mustacchi lm_isle_t * requested_isle;
892*d14abf15SRobert Mustacchi
893*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_isle_process nbytes = %d cid=%d\n", nbytes, tcp->cid);
894*d14abf15SRobert Mustacchi
895*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
896*d14abf15SRobert Mustacchi requested_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
897*d14abf15SRobert Mustacchi DbgBreakIf(!requested_isle);
898*d14abf15SRobert Mustacchi
899*d14abf15SRobert Mustacchi /* update the previous buffer */
900*d14abf15SRobert Mustacchi last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&requested_isle->isle_gen_bufs_list_head);
901*d14abf15SRobert Mustacchi DbgBreakIf(last_gen_buf == NULL);
902*d14abf15SRobert Mustacchi if (nbytes) {
903*d14abf15SRobert Mustacchi gen_info->isle_nbytes += nbytes;
904*d14abf15SRobert Mustacchi requested_isle->isle_nbytes += nbytes;
905*d14abf15SRobert Mustacchi DbgBreakIf(last_gen_buf->placed_bytes != 0);
906*d14abf15SRobert Mustacchi DbgBreakIf(nbytes > 0xffff);
907*d14abf15SRobert Mustacchi last_gen_buf->placed_bytes = (u16_t)nbytes;
908*d14abf15SRobert Mustacchi } else {
909*d14abf15SRobert Mustacchi DbgBreakIf(gen_buf == NULL);
910*d14abf15SRobert Mustacchi DbgBreakIf(last_gen_buf->placed_bytes == 0);
911*d14abf15SRobert Mustacchi }
912*d14abf15SRobert Mustacchi
913*d14abf15SRobert Mustacchi if (gen_buf) {
914*d14abf15SRobert Mustacchi DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
915*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
916*d14abf15SRobert Mustacchi
917*d14abf15SRobert Mustacchi d_list_push_tail(&requested_isle->isle_gen_bufs_list_head, &gen_buf->link);
918*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta++;
919*d14abf15SRobert Mustacchi if (pdev->params.l4_max_gen_bufs_in_isle
920*d14abf15SRobert Mustacchi && (d_list_entry_cnt(&requested_isle->isle_gen_bufs_list_head) > pdev->params.l4_max_gen_bufs_in_isle)) {
921*d14abf15SRobert Mustacchi if (pdev->params.l4_limit_isles & L4_LI_NOTIFY) {
922*d14abf15SRobert Mustacchi DbgBreak();
923*d14abf15SRobert Mustacchi }
924*d14abf15SRobert Mustacchi if (pdev->params.l4_limit_isles & L4_LI_MAX_GEN_BUFS_IN_ISLE) {
925*d14abf15SRobert Mustacchi rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_TOO_BIG_ISLE;
926*d14abf15SRobert Mustacchi }
927*d14abf15SRobert Mustacchi }
928*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
929*d14abf15SRobert Mustacchi SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GAIR, nbytes);
930*d14abf15SRobert Mustacchi } else {
931*d14abf15SRobert Mustacchi SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GRI, nbytes);
932*d14abf15SRobert Mustacchi #endif
933*d14abf15SRobert Mustacchi }
934*d14abf15SRobert Mustacchi }
935*d14abf15SRobert Mustacchi
lm_tcp_rx_gen_isle_left_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)936*d14abf15SRobert Mustacchi void lm_tcp_rx_gen_isle_left_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
937*d14abf15SRobert Mustacchi {
938*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
939*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
940*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * last_gen_buf;
941*d14abf15SRobert Mustacchi lm_isle_t * requested_isle;
942*d14abf15SRobert Mustacchi
943*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_isle_process nbytes = %d cid=%d\n", nbytes, tcp->cid);
944*d14abf15SRobert Mustacchi
945*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
946*d14abf15SRobert Mustacchi requested_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
947*d14abf15SRobert Mustacchi DbgBreakIf(!requested_isle);
948*d14abf15SRobert Mustacchi
949*d14abf15SRobert Mustacchi if (nbytes) {
950*d14abf15SRobert Mustacchi DbgBreakIf(!gen_info->wait_for_isle_left);
951*d14abf15SRobert Mustacchi DbgBreakIf(gen_buf != NULL);
952*d14abf15SRobert Mustacchi gen_info->wait_for_isle_left = FALSE;
953*d14abf15SRobert Mustacchi gen_info->isle_nbytes += nbytes;
954*d14abf15SRobert Mustacchi requested_isle->isle_nbytes += nbytes;
955*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
956*d14abf15SRobert Mustacchi #pragma prefast (push)
957*d14abf15SRobert Mustacchi #pragma prefast (disable:28182) // If nbytes is larger that zero than ((returned_list_of_gen_bufs))->head is not NULL.
958*d14abf15SRobert Mustacchi #endif //_NTDDK_
959*d14abf15SRobert Mustacchi last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(&requested_isle->isle_gen_bufs_list_head);
960*d14abf15SRobert Mustacchi DbgBreakIf(last_gen_buf->placed_bytes);
961*d14abf15SRobert Mustacchi last_gen_buf->placed_bytes = (u16_t)nbytes;
962*d14abf15SRobert Mustacchi #if defined(_NTDDK_)
963*d14abf15SRobert Mustacchi #pragma prefast (pop)
964*d14abf15SRobert Mustacchi #endif //_NTDDK_
965*d14abf15SRobert Mustacchi } else {
966*d14abf15SRobert Mustacchi DbgBreakIf(gen_info->wait_for_isle_left);
967*d14abf15SRobert Mustacchi DbgBreakIf(gen_buf == NULL);
968*d14abf15SRobert Mustacchi DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
969*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
970*d14abf15SRobert Mustacchi gen_info->wait_for_isle_left = TRUE;
971*d14abf15SRobert Mustacchi d_list_push_head(&requested_isle->isle_gen_bufs_list_head, &gen_buf->link);
972*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta++;
973*d14abf15SRobert Mustacchi }
974*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
975*d14abf15SRobert Mustacchi SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GAIL, nbytes);
976*d14abf15SRobert Mustacchi #endif
977*d14abf15SRobert Mustacchi }
978*d14abf15SRobert Mustacchi
lm_tcp_rx_gen_join_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t sb_idx,u8_t isle_num)979*d14abf15SRobert Mustacchi void lm_tcp_rx_gen_join_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u8_t sb_idx, u8_t isle_num)
980*d14abf15SRobert Mustacchi {
981*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
982*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
983*d14abf15SRobert Mustacchi lm_isle_t * start_isle;
984*d14abf15SRobert Mustacchi d_list_t gen_buf_list;
985*d14abf15SRobert Mustacchi u32_t isle_nbytes;
986*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_join_process cid=%d\n", tcp->cid);
987*d14abf15SRobert Mustacchi
988*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
989*d14abf15SRobert Mustacchi
990*d14abf15SRobert Mustacchi
991*d14abf15SRobert Mustacchi if (!isle_num) {
992*d14abf15SRobert Mustacchi /* break if peninsula list isn't empty and the last buffer in list isn't released yet */
993*d14abf15SRobert Mustacchi DbgBreakIf(d_list_entry_cnt(&gen_info->dpc_peninsula_list) &&
994*d14abf15SRobert Mustacchi ((lm_tcp_gen_buf_t *)(d_list_peek_tail(&gen_info->dpc_peninsula_list)))->placed_bytes == 0);
995*d14abf15SRobert Mustacchi d_list_init(&gen_buf_list, NULL, NULL, 0);
996*d14abf15SRobert Mustacchi isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, 1, &gen_buf_list);
997*d14abf15SRobert Mustacchi // DbgBreakIf(!(isle_nbytes && d_list_entry_cnt(&gen_buf_list)));
998*d14abf15SRobert Mustacchi if (d_list_entry_cnt(&gen_buf_list) > 1) {
999*d14abf15SRobert Mustacchi DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_head(&gen_buf_list)))->placed_bytes == 0);
1000*d14abf15SRobert Mustacchi }
1001*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta -= (s32_t)d_list_entry_cnt(&gen_buf_list);
1002*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
1003*d14abf15SRobert Mustacchi
1004*d14abf15SRobert Mustacchi if (!d_list_is_empty(&gen_buf_list)) {
1005*d14abf15SRobert Mustacchi d_list_add_tail(&gen_info->dpc_peninsula_list, &gen_buf_list);
1006*d14abf15SRobert Mustacchi }
1007*d14abf15SRobert Mustacchi gen_info->dpc_peninsula_nbytes += isle_nbytes;
1008*d14abf15SRobert Mustacchi gen_info->isle_nbytes -= isle_nbytes;
1009*d14abf15SRobert Mustacchi } else {
1010*d14abf15SRobert Mustacchi start_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
1011*d14abf15SRobert Mustacchi d_list_init(&gen_buf_list, NULL, NULL, 0);
1012*d14abf15SRobert Mustacchi isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, isle_num + 1, &gen_buf_list);
1013*d14abf15SRobert Mustacchi // DbgBreakIf(!(isle_nbytes && d_list_entry_cnt(&gen_buf_list)));
1014*d14abf15SRobert Mustacchi pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
1015*d14abf15SRobert Mustacchi if (d_list_entry_cnt(&gen_buf_list) > 1) {
1016*d14abf15SRobert Mustacchi DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_head(&gen_buf_list)))->placed_bytes == 0);
1017*d14abf15SRobert Mustacchi }
1018*d14abf15SRobert Mustacchi DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_tail(&start_isle->isle_gen_bufs_list_head)))->placed_bytes == 0);
1019*d14abf15SRobert Mustacchi if (!d_list_is_empty(&gen_buf_list)) {
1020*d14abf15SRobert Mustacchi d_list_add_tail(&start_isle->isle_gen_bufs_list_head, &gen_buf_list);
1021*d14abf15SRobert Mustacchi }
1022*d14abf15SRobert Mustacchi start_isle->isle_nbytes += isle_nbytes;
1023*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE
1024*d14abf15SRobert Mustacchi SET_DEBUG_OOO_INFO(start_isle,CMP_OPCODE_TOE_GJ,0);
1025*d14abf15SRobert Mustacchi #endif
1026*d14abf15SRobert Mustacchi }
1027*d14abf15SRobert Mustacchi rx_con->dpc_info.dpc_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
1028*d14abf15SRobert Mustacchi
1029*d14abf15SRobert Mustacchi }
1030*d14abf15SRobert Mustacchi
lm_tcp_rx_next_grq_buf(lm_device_t * pdev,u8_t sb_idx)1031*d14abf15SRobert Mustacchi static __inline lm_tcp_gen_buf_t * lm_tcp_rx_next_grq_buf(lm_device_t * pdev, u8_t sb_idx)
1032*d14abf15SRobert Mustacchi {
1033*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf;
1034*d14abf15SRobert Mustacchi
1035*d14abf15SRobert Mustacchi /* 11/12/2008 - TODO: Enhance locking acquisition method,
1036*d14abf15SRobert Mustacchi * TBD: aggragate cons, and active_gen_list updates */
1037*d14abf15SRobert Mustacchi MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, sb_idx);
1038*d14abf15SRobert Mustacchi
1039*d14abf15SRobert Mustacchi /* Get the generic buffer for this completion */
1040*d14abf15SRobert Mustacchi gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_head(&pdev->toe_info.grqs[sb_idx].active_gen_list);
1041*d14abf15SRobert Mustacchi if (ERR_IF(gen_buf == NULL)) {
1042*d14abf15SRobert Mustacchi DbgBreakMsg("Received a fw GA/GAI without any generic buffers\n");
1043*d14abf15SRobert Mustacchi return NULL;
1044*d14abf15SRobert Mustacchi }
1045*d14abf15SRobert Mustacchi DbgBreakIf(!gen_buf);
1046*d14abf15SRobert Mustacchi DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
1047*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
1048*d14abf15SRobert Mustacchi
1049*d14abf15SRobert Mustacchi /* each generic buffer is represented by ONE bd on the bd-chain */
1050*d14abf15SRobert Mustacchi lm_bd_chain_bds_consumed(&pdev->toe_info.grqs[sb_idx].bd_chain, 1);
1051*d14abf15SRobert Mustacchi
1052*d14abf15SRobert Mustacchi MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, sb_idx);
1053*d14abf15SRobert Mustacchi
1054*d14abf15SRobert Mustacchi return gen_buf;
1055*d14abf15SRobert Mustacchi }
1056*d14abf15SRobert Mustacchi
1057*d14abf15SRobert Mustacchi /** Description
1058*d14abf15SRobert Mustacchi * completes the fast-path operations for a certain connection
1059*d14abf15SRobert Mustacchi * Assumption:
1060*d14abf15SRobert Mustacchi * fp-rx lock is taken
1061*d14abf15SRobert Mustacchi * This function is mutual exclusive: there can only be one thread running it at a time.
1062*d14abf15SRobert Mustacchi */
lm_tcp_rx_complete_tcp_fp(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * con)1063*d14abf15SRobert Mustacchi void lm_tcp_rx_complete_tcp_fp(lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_con_t * con)
1064*d14abf15SRobert Mustacchi {
1065*d14abf15SRobert Mustacchi lm_tcp_buffer_t * curr_tcp_buf;
1066*d14abf15SRobert Mustacchi u32_t add_sws_bytes = 0;
1067*d14abf15SRobert Mustacchi
1068*d14abf15SRobert Mustacchi if (con->dpc_info.dpc_comp_blocked) {
1069*d14abf15SRobert Mustacchi /* we will no longer receive a "skp" */
1070*d14abf15SRobert Mustacchi SET_FLAGS(con->flags, TCP_POST_NO_SKP); /* so that new posts complete immediately... */
1071*d14abf15SRobert Mustacchi /* complete any outstanding skp bytes... */
1072*d14abf15SRobert Mustacchi if (tcp->rx_con->u.rx.skp_bytes_copied) {
1073*d14abf15SRobert Mustacchi /* now we can complete these bytes that have already been copied... */
1074*d14abf15SRobert Mustacchi tcp->rx_con->bytes_comp_cnt += tcp->rx_con->u.rx.skp_bytes_copied;
1075*d14abf15SRobert Mustacchi /* complete nbytes on buffers (dpc-flow ) */
1076*d14abf15SRobert Mustacchi lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, tcp->rx_con->u.rx.skp_bytes_copied, /* push=*/ 0);
1077*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.skp_bytes_copied = 0;
1078*d14abf15SRobert Mustacchi }
1079*d14abf15SRobert Mustacchi }
1080*d14abf15SRobert Mustacchi
1081*d14abf15SRobert Mustacchi /* TBA Michals FW BYPASS...copy here */
1082*d14abf15SRobert Mustacchi if (!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list)) {
1083*d14abf15SRobert Mustacchi /* only copy if this is the end... otherwise, we will wait for that SKP... */
1084*d14abf15SRobert Mustacchi if (lm_tcp_next_entry_dpc_active_list(con) && con->u.rx.gen_info.dpc_peninsula_nbytes && con->dpc_info.dpc_comp_blocked) {
1085*d14abf15SRobert Mustacchi /* couldn't have been posted buffers if peninsula exists... */
1086*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&con->u.rx.gen_info.peninsula_list));
1087*d14abf15SRobert Mustacchi con->u.rx.gen_info.bytes_copied_cnt_in_comp += lm_tcp_rx_peninsula_to_rq(pdev, tcp, 0xffffffff,NON_EXISTENT_SB_IDX);
1088*d14abf15SRobert Mustacchi }
1089*d14abf15SRobert Mustacchi
1090*d14abf15SRobert Mustacchi /* check if we still have something in the peninsula after the copying AND our active tb list is empty... otherwise, it's intended
1091*d14abf15SRobert Mustacchi * for that and we'll wait for the next RQ_SKP in the next DPC. UNLESS, we've got completion block, in which case RQ_SKP won't make it
1092*d14abf15SRobert Mustacchi * way ever... */
1093*d14abf15SRobert Mustacchi curr_tcp_buf = lm_tcp_next_entry_dpc_active_list(con);
1094*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list) && curr_tcp_buf && con->dpc_info.dpc_comp_blocked);
1095*d14abf15SRobert Mustacchi if (!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list) && !curr_tcp_buf) {
1096*d14abf15SRobert Mustacchi d_list_add_tail(&con->u.rx.gen_info.peninsula_list, &con->u.rx.gen_info.dpc_peninsula_list);
1097*d14abf15SRobert Mustacchi con->u.rx.gen_info.peninsula_nbytes += con->u.rx.gen_info.dpc_peninsula_nbytes;
1098*d14abf15SRobert Mustacchi con->u.rx.gen_info.dpc_peninsula_nbytes = 0;
1099*d14abf15SRobert Mustacchi
1100*d14abf15SRobert Mustacchi /* we want to leave any non-released buffer in the dpc_peninsula (so that we don't access the list w/o a lock) */
1101*d14abf15SRobert Mustacchi if (((lm_tcp_gen_buf_t *)d_list_peek_tail(&con->u.rx.gen_info.peninsula_list))->placed_bytes == 0) {
1102*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf;
1103*d14abf15SRobert Mustacchi gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_tail(&con->u.rx.gen_info.peninsula_list);
1104*d14abf15SRobert Mustacchi if CHK_NULL(gen_buf)
1105*d14abf15SRobert Mustacchi {
1106*d14abf15SRobert Mustacchi DbgBreakIfAll( !gen_buf ) ;
1107*d14abf15SRobert Mustacchi return;
1108*d14abf15SRobert Mustacchi }
1109*d14abf15SRobert Mustacchi d_list_init(&con->u.rx.gen_info.dpc_peninsula_list, &gen_buf->link, &gen_buf->link, 1);
1110*d14abf15SRobert Mustacchi } else {
1111*d14abf15SRobert Mustacchi d_list_clear(&con->u.rx.gen_info.dpc_peninsula_list);
1112*d14abf15SRobert Mustacchi }
1113*d14abf15SRobert Mustacchi
1114*d14abf15SRobert Mustacchi }
1115*d14abf15SRobert Mustacchi }
1116*d14abf15SRobert Mustacchi
1117*d14abf15SRobert Mustacchi /**** Client completing : may result in lock-release *****/
1118*d14abf15SRobert Mustacchi /* during lock-release, due to this function being called from service_deferred, more
1119*d14abf15SRobert Mustacchi * cqes can be processed. We don't want to mix. This function is mutually exclusive, so
1120*d14abf15SRobert Mustacchi * any processing makes it's way to being completed by calling this function.
1121*d14abf15SRobert Mustacchi * the following define a "fast-path completion"
1122*d14abf15SRobert Mustacchi * (i) RQ buffers to be completed
1123*d14abf15SRobert Mustacchi * defined by dpc_completed_tail and are collected during lm_tcp_complete_bufs BEFORE lock
1124*d14abf15SRobert Mustacchi * is released, so no more buffer processing can make it's way into this buffer completion.
1125*d14abf15SRobert Mustacchi * (ii) GRQ buffers to be indicated
1126*d14abf15SRobert Mustacchi * Are taken from peninsula, and not dpc_peninsula, so no NEW generic buffers can make their
1127*d14abf15SRobert Mustacchi * way to this indication
1128*d14abf15SRobert Mustacchi * (iii) Fin to be indicated
1129*d14abf15SRobert Mustacchi * determined by the flags, since dpc_flags CAN be modified during processing we copy
1130*d14abf15SRobert Mustacchi * them to a snapshot_flags parameter, which is initialized in this function only, so no fin
1131*d14abf15SRobert Mustacchi * can can make its way in while we release the lock.
1132*d14abf15SRobert Mustacchi * (iv) Remainders for sp
1133*d14abf15SRobert Mustacchi * all sp operations are logged in dpc_flags. for the same reason as (iii) no sp commands can
1134*d14abf15SRobert Mustacchi * make their way in during this fp-completion, all sp-processing after will relate to this point in time.
1135*d14abf15SRobert Mustacchi */
1136*d14abf15SRobert Mustacchi /* NDC is the only fp flag: determining that we should complete all the processed cqes. Therefore, we can
1137*d14abf15SRobert Mustacchi * turn it off here. We should turn it off, since if no sp flags are on, the sp-complete function shouldn't be called
1138*d14abf15SRobert Mustacchi */
1139*d14abf15SRobert Mustacchi // RESET_FLAGS(con->dpc_info.dpc_flags, LM_TCP_DPC_NDC);
1140*d14abf15SRobert Mustacchi con->dpc_info.snapshot_flags = con->dpc_info.dpc_flags;
1141*d14abf15SRobert Mustacchi con->dpc_info.dpc_flags = 0;
1142*d14abf15SRobert Mustacchi
1143*d14abf15SRobert Mustacchi /* compensate fw-window with the rq-placed bytes */
1144*d14abf15SRobert Mustacchi if (con->dpc_info.dpc_rq_placed_bytes) {
1145*d14abf15SRobert Mustacchi add_sws_bytes += con->dpc_info.dpc_rq_placed_bytes;
1146*d14abf15SRobert Mustacchi con->dpc_info.dpc_rq_placed_bytes = 0;
1147*d14abf15SRobert Mustacchi }
1148*d14abf15SRobert Mustacchi
1149*d14abf15SRobert Mustacchi
1150*d14abf15SRobert Mustacchi /* check if we completed a buffer that as a result unblocks the um from posting more (a split buffer that
1151*d14abf15SRobert Mustacchi * was placed on the last bd). If this occured - we should not have any other RQs!!! */
1152*d14abf15SRobert Mustacchi if (con->dpc_info.dpc_unblock_post) {
1153*d14abf15SRobert Mustacchi RESET_FLAGS(con->flags, TCP_POST_DELAYED);
1154*d14abf15SRobert Mustacchi con->dpc_info.dpc_unblock_post = 0;
1155*d14abf15SRobert Mustacchi }
1156*d14abf15SRobert Mustacchi
1157*d14abf15SRobert Mustacchi /* NOTE: AFTER THIS STAGE DO NOT ACCESS DPC-INFO ANYMORE - for deferred cqes issue */
1158*d14abf15SRobert Mustacchi
1159*d14abf15SRobert Mustacchi /* complete buffers to client */
1160*d14abf15SRobert Mustacchi if (con->dpc_info.dpc_completed_tail != NULL) {
1161*d14abf15SRobert Mustacchi lm_tcp_complete_bufs(pdev,tcp,con);
1162*d14abf15SRobert Mustacchi }
1163*d14abf15SRobert Mustacchi
1164*d14abf15SRobert Mustacchi /* Is there something left to indicate? */
1165*d14abf15SRobert Mustacchi if (!d_list_is_empty(&con->u.rx.gen_info.peninsula_list) && _lm_tcp_ok_to_indicate(con)) {
1166*d14abf15SRobert Mustacchi mm_tcp_rx_indicate_gen(pdev,tcp);
1167*d14abf15SRobert Mustacchi add_sws_bytes += tcp->rx_con->u.rx.gen_info.add_sws_bytes; /* any bytes we need to update will be aggregated here during indicate */
1168*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.gen_info.add_sws_bytes = 0;
1169*d14abf15SRobert Mustacchi }
1170*d14abf15SRobert Mustacchi
1171*d14abf15SRobert Mustacchi if (add_sws_bytes) {
1172*d14abf15SRobert Mustacchi lm_tcp_rx_post_sws(pdev, tcp, con, add_sws_bytes, TCP_RX_POST_SWS_INC);
1173*d14abf15SRobert Mustacchi }
1174*d14abf15SRobert Mustacchi
1175*d14abf15SRobert Mustacchi }
1176*d14abf15SRobert Mustacchi
1177*d14abf15SRobert Mustacchi
1178*d14abf15SRobert Mustacchi /** Description
1179*d14abf15SRobert Mustacchi * processes a single cqe.
1180*d14abf15SRobert Mustacchi */
lm_tcp_rx_process_cqe(lm_device_t * pdev,struct toe_rx_cqe * cqe,lm_tcp_state_t * tcp,u8_t sb_idx)1181*d14abf15SRobert Mustacchi void lm_tcp_rx_process_cqe(
1182*d14abf15SRobert Mustacchi lm_device_t * pdev,
1183*d14abf15SRobert Mustacchi struct toe_rx_cqe * cqe,
1184*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
1185*d14abf15SRobert Mustacchi u8_t sb_idx)
1186*d14abf15SRobert Mustacchi {
1187*d14abf15SRobert Mustacchi u32_t nbytes;
1188*d14abf15SRobert Mustacchi u8_t cmd;
1189*d14abf15SRobert Mustacchi u8_t isle_num = 0;
1190*d14abf15SRobert Mustacchi
1191*d14abf15SRobert Mustacchi cmd = ((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT);
1192*d14abf15SRobert Mustacchi
1193*d14abf15SRobert Mustacchi
1194*d14abf15SRobert Mustacchi /* Check that the cqe nbytes make sense, we could have got here by chance... */
1195*d14abf15SRobert Mustacchi /* update completion has a different usage for nbyts which is a sequence -so any number is valid*/
1196*d14abf15SRobert Mustacchi if(IS_OOO_CQE(cmd)) {
1197*d14abf15SRobert Mustacchi nbytes = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_NBYTES) >> TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT;
1198*d14abf15SRobert Mustacchi isle_num = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_ISLE_NUM) >> TOE_RX_CQE_OOO_PARAMS_ISLE_NUM_SHIFT;
1199*d14abf15SRobert Mustacchi if (((isle_num == 0) && (cmd != CMP_OPCODE_TOE_GJ)) || (isle_num > T_TCP_MAX_ISLES_PER_CONNECTION_TOE)) {
1200*d14abf15SRobert Mustacchi DbgMessage(pdev, FATAL, "Isle number %d is not valid for OOO CQE %d\n", isle_num, cmd);
1201*d14abf15SRobert Mustacchi DbgBreak();
1202*d14abf15SRobert Mustacchi }
1203*d14abf15SRobert Mustacchi } else if (cmd == RAMROD_OPCODE_TOE_UPDATE) {
1204*d14abf15SRobert Mustacchi nbytes = cqe->data.raw_data;
1205*d14abf15SRobert Mustacchi } else {
1206*d14abf15SRobert Mustacchi nbytes = (cqe->data.in_order_params.in_order_params & TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES) >> TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES_SHIFT;
1207*d14abf15SRobert Mustacchi DbgBreakIfAll(nbytes & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a bit odd...*/
1208*d14abf15SRobert Mustacchi DbgBreakIf(nbytes && tcp->rx_con->dpc_info.dpc_comp_blocked);
1209*d14abf15SRobert Mustacchi }
1210*d14abf15SRobert Mustacchi if (pdev->toe_info.archipelago.l4_decrease_archipelago
1211*d14abf15SRobert Mustacchi && d_list_entry_cnt(&tcp->rx_con->u.rx.gen_info.first_isle.isle_gen_bufs_list_head)) {
1212*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_TOO_MANY_ISLES;
1213*d14abf15SRobert Mustacchi }
1214*d14abf15SRobert Mustacchi switch(cmd)
1215*d14abf15SRobert Mustacchi {
1216*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_SRC_ERR:
1217*d14abf15SRobert Mustacchi DbgMessage(pdev, FATAL, "ERROR: NO SEARCHER ENTRY!\n");
1218*d14abf15SRobert Mustacchi DbgBreakIfAll(TRUE);
1219*d14abf15SRobert Mustacchi return;
1220*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GA:
1221*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "GenericAdd cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1222*d14abf15SRobert Mustacchi lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes,
1223*d14abf15SRobert Mustacchi lm_tcp_rx_next_grq_buf(pdev, sb_idx));
1224*d14abf15SRobert Mustacchi return;
1225*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GNI:
1226*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "GenericCreateIsle cid=%d isle_num=%d!\n", tcp->cid, isle_num);
1227*d14abf15SRobert Mustacchi DbgBreakIf(nbytes);
1228*d14abf15SRobert Mustacchi lm_tcp_rx_gen_isle_create(pdev, tcp,
1229*d14abf15SRobert Mustacchi lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1230*d14abf15SRobert Mustacchi return;
1231*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GAIR:
1232*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "GenericAddIsleR cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1233*d14abf15SRobert Mustacchi lm_tcp_rx_gen_isle_right_process(pdev, tcp, nbytes,
1234*d14abf15SRobert Mustacchi lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1235*d14abf15SRobert Mustacchi return;
1236*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GAIL:
1237*d14abf15SRobert Mustacchi DbgMessage(pdev, WARN, "GenericAddIsleL cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1238*d14abf15SRobert Mustacchi if (nbytes)
1239*d14abf15SRobert Mustacchi {
1240*d14abf15SRobert Mustacchi lm_tcp_rx_gen_isle_left_process(pdev, tcp, nbytes,
1241*d14abf15SRobert Mustacchi NULL, sb_idx, isle_num);
1242*d14abf15SRobert Mustacchi }
1243*d14abf15SRobert Mustacchi else
1244*d14abf15SRobert Mustacchi {
1245*d14abf15SRobert Mustacchi lm_tcp_rx_gen_isle_left_process(pdev, tcp, 0,
1246*d14abf15SRobert Mustacchi lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1247*d14abf15SRobert Mustacchi }
1248*d14abf15SRobert Mustacchi return;
1249*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GRI:
1250*d14abf15SRobert Mustacchi // DbgMessage(pdev, WARN, "GenericReleaseIsle cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1251*d14abf15SRobert Mustacchi lm_tcp_rx_gen_isle_right_process(pdev, tcp, nbytes, NULL, sb_idx, isle_num);
1252*d14abf15SRobert Mustacchi return;
1253*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GR:
1254*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "GenericRelease cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1255*d14abf15SRobert Mustacchi lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes, NULL);
1256*d14abf15SRobert Mustacchi return;
1257*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_GJ:
1258*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "GenericJoin cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1259*d14abf15SRobert Mustacchi lm_tcp_rx_gen_join_process(pdev, tcp, sb_idx, isle_num);
1260*d14abf15SRobert Mustacchi return;
1261*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_CMP:
1262*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "Cmp(push) cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1263*d14abf15SRobert Mustacchi /* Add fast path handler here */
1264*d14abf15SRobert Mustacchi lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 1);
1265*d14abf15SRobert Mustacchi return;
1266*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_REL:
1267*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "Rel(nopush) cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1268*d14abf15SRobert Mustacchi lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 0);
1269*d14abf15SRobert Mustacchi return;
1270*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_SKP:
1271*d14abf15SRobert Mustacchi //DbgMessage(pdev, WARN, "Skp cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1272*d14abf15SRobert Mustacchi lm_tcp_rx_skp_process(pdev, tcp, nbytes, sb_idx);
1273*d14abf15SRobert Mustacchi return;
1274*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_DGI:
1275*d14abf15SRobert Mustacchi DbgMessage(pdev, WARN, "Delete Isle cid=%d!\n", tcp->cid);
1276*d14abf15SRobert Mustacchi lm_tcp_rx_delete_isle(pdev, tcp, sb_idx, isle_num, nbytes);
1277*d14abf15SRobert Mustacchi return;
1278*d14abf15SRobert Mustacchi }
1279*d14abf15SRobert Mustacchi
1280*d14abf15SRobert Mustacchi /* for the rest of the commands, if we have nbytes, we need to complete them (generic/app) */
1281*d14abf15SRobert Mustacchi /* unless it's an update completion, in which case the nbytes has a different meaning. */
1282*d14abf15SRobert Mustacchi if ((cmd != RAMROD_OPCODE_TOE_UPDATE) && nbytes) {
1283*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf;
1284*d14abf15SRobert Mustacchi gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list);
1285*d14abf15SRobert Mustacchi if(gen_buf && (gen_buf->placed_bytes == 0)) {
1286*d14abf15SRobert Mustacchi lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes, NULL);
1287*d14abf15SRobert Mustacchi } else {
1288*d14abf15SRobert Mustacchi /* if we're here - we will no longer see a RQ_SKP so, let's simulate one...note if we didn't get nbytes here.. we still need
1289*d14abf15SRobert Mustacchi * to take care of this later if it's a blocking completion the skip will have to be everything in the peninsula
1290*d14abf15SRobert Mustacchi * we can access skp_bytes here lockless, because the only time it will be accessed in post is if there is something in the peninsula, if we got a RQ_SKP here, there can't be...*/
1291*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list));
1292*d14abf15SRobert Mustacchi DbgBreakIf(tcp->rx_con->rq_nbytes <= tcp->rx_con->u.rx.gen_info.dpc_peninsula_nbytes+tcp->rx_con->u.rx.skp_bytes_copied); // we got a RQ completion here... so peninsula CAN;T cover RQ!!!
1293*d14abf15SRobert Mustacchi lm_tcp_rx_skp_process(pdev, tcp, tcp->rx_con->u.rx.gen_info.dpc_peninsula_nbytes+tcp->rx_con->u.rx.skp_bytes_copied, sb_idx);
1294*d14abf15SRobert Mustacchi
1295*d14abf15SRobert Mustacchi /* We give push=1 here, this will seperate between 'received' data and 'aborted' bufs. we won't
1296*d14abf15SRobert Mustacchi * have any buffers left that need to be aborted that have partial completed data on them */
1297*d14abf15SRobert Mustacchi lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 2 /* push as result of sp-completion*/);
1298*d14abf15SRobert Mustacchi }
1299*d14abf15SRobert Mustacchi }
1300*d14abf15SRobert Mustacchi
1301*d14abf15SRobert Mustacchi switch (cmd) {
1302*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_FIN_RCV:
1303*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_FIN_RECV;
1304*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_FIN_RECEIVED */
1305*d14abf15SRobert Mustacchi return;
1306*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_FIN_UPL:
1307*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_FIN_RECV_UPL;
1308*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_FIN_RECEIVED + Request to upload the connection */
1309*d14abf15SRobert Mustacchi return;
1310*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_RST_RCV:
1311*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RESET_RECV;
1312*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_RST_RECEIVED */
1313*d14abf15SRobert Mustacchi return;
1314*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_UPDATE:
1315*d14abf15SRobert Mustacchi DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) && (tcp->hdr.status != STATE_STATUS_ABORTED));
1316*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request == NULL);
1317*d14abf15SRobert Mustacchi DbgBreakIf((tcp->sp_request->type != SP_REQUEST_UPDATE_NEIGH) &&
1318*d14abf15SRobert Mustacchi (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH) &&
1319*d14abf15SRobert Mustacchi (tcp->sp_request->type != SP_REQUEST_UPDATE_TCP) &&
1320*d14abf15SRobert Mustacchi (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH_RELINK));
1321*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1322*d14abf15SRobert Mustacchi
1323*d14abf15SRobert Mustacchi /*DbgMessage(pdev, FATAL, "lm_tcp_rx_process_cqe() RAMROD_OPCODE_TOE_UPDATE: IGNORE_WND_UPDATES=%d, cqe->nbytes=%d\n", GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES), cqe->nbytes);*/
1324*d14abf15SRobert Mustacchi
1325*d14abf15SRobert Mustacchi if ((tcp->sp_request->type == SP_REQUEST_UPDATE_TCP) && (GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES)))
1326*d14abf15SRobert Mustacchi {
1327*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_fw_wnd_after_dec = nbytes;
1328*d14abf15SRobert Mustacchi }
1329*d14abf15SRobert Mustacchi return;
1330*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_URG:
1331*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_URG;
1332*d14abf15SRobert Mustacchi return;
1333*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_MAX_RT:
1334*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: CMP_OPCODE_TOE_MAX_RT cid=%d\n", tcp->cid);
1335*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RT_TO;
1336*d14abf15SRobert Mustacchi return;
1337*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_RT_TO:
1338*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: CMP_OPCODE_TOE_RT_TO cid=%d\n", tcp->cid);
1339*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RT_TO;
1340*d14abf15SRobert Mustacchi return;
1341*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_KA_TO:
1342*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_KA_TO;
1343*d14abf15SRobert Mustacchi return;
1344*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_DBT_RE:
1345*d14abf15SRobert Mustacchi /* LH Inbox specification: Black Hole detection (RFC 2923)
1346*d14abf15SRobert Mustacchi * TCP Chimney target MUST upload the connection if the TCPDoubtReachabilityRetransmissions threshold is hit.
1347*d14abf15SRobert Mustacchi * SPARTA test scripts and tests that will fail if not implemented: All tests in Tcp_BlackholeDetection.wsf, we cause
1348*d14abf15SRobert Mustacchi * the upload by giving L4_UPLOAD_REASON_UPLOAD_REQUEST (same as Teton) */
1349*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4, "lm_tcp_rx_process_cqe: RCQE CMP_OPCODE_TOE_DBT_RE, cid=%d\n", tcp->cid);
1350*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: RCQE CMP_OPCODE_TOE_DBT_RE, cid=%d IGNORING!!!\n", tcp->cid);
1351*d14abf15SRobert Mustacchi /* We add this here only for windows and not ediag */
1352*d14abf15SRobert Mustacchi #if (!defined(DOS)) && (!defined(__LINUX))
1353*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_DBT_RE;
1354*d14abf15SRobert Mustacchi #endif
1355*d14abf15SRobert Mustacchi return;
1356*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_SYN:
1357*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_FW2_TO:
1358*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_UPLD_CLOSE;
1359*d14abf15SRobert Mustacchi return;
1360*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_2WY_CLS:
1361*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_UPLD_CLOSE;
1362*d14abf15SRobert Mustacchi return;
1363*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_OPT_ERR:
1364*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_OPT_ERR;
1365*d14abf15SRobert Mustacchi return;
1366*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_QUERY:
1367*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request );
1368*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_QUERY);
1369*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1370*d14abf15SRobert Mustacchi return;
1371*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_SEARCHER_DELETE:
1372*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_TERMINATE_OFFLOAD);
1373*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1374*d14abf15SRobert Mustacchi return;
1375*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_RESET_SEND:
1376*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request);
1377*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
1378*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1379*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_RST_REQ_COMPLETED */
1380*d14abf15SRobert Mustacchi return;
1381*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_INVALIDATE:
1382*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request);
1383*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INVALIDATE);
1384*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1385*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_INV_REQ_COMPLETED */
1386*d14abf15SRobert Mustacchi return;
1387*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_TERMINATE:
1388*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request);
1389*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
1390*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1391*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_TRM_REQ_COMPLETED */
1392*d14abf15SRobert Mustacchi return;
1393*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
1394*d14abf15SRobert Mustacchi DbgBreakIf(nbytes);
1395*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request );
1396*d14abf15SRobert Mustacchi DbgBreakIf((tcp->sp_request->type != SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT) &&
1397*d14abf15SRobert Mustacchi (tcp->sp_request->type != SP_REQUEST_PENDING_REMOTE_DISCONNECT) &&
1398*d14abf15SRobert Mustacchi (tcp->sp_request->type != SP_REQUEST_PENDING_TX_RST));
1399*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1400*d14abf15SRobert Mustacchi return;
1401*d14abf15SRobert Mustacchi case RAMROD_OPCODE_TOE_INITIATE_OFFLOAD:
1402*d14abf15SRobert Mustacchi DbgBreakIf(nbytes);
1403*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request );
1404*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INITIATE_OFFLOAD);
1405*d14abf15SRobert Mustacchi
1406*d14abf15SRobert Mustacchi /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1407*d14abf15SRobert Mustacchi * complete ofld request here - assumption: tcp lock is NOT taken by caller */
1408*d14abf15SRobert Mustacchi lm_tcp_comp_initiate_offload_request(pdev, tcp, LM_STATUS_SUCCESS);
1409*d14abf15SRobert Mustacchi lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD, tcp->ulp_type, tcp->cid);
1410*d14abf15SRobert Mustacchi
1411*d14abf15SRobert Mustacchi return;
1412*d14abf15SRobert Mustacchi case CMP_OPCODE_TOE_LCN_ERR:
1413*d14abf15SRobert Mustacchi DbgBreakIf(! tcp->sp_request );
1414*d14abf15SRobert Mustacchi DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INITIATE_OFFLOAD);
1415*d14abf15SRobert Mustacchi tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1416*d14abf15SRobert Mustacchi return;
1417*d14abf15SRobert Mustacchi default:
1418*d14abf15SRobert Mustacchi DbgMessage(pdev, FATAL, "unexpected rx cqe opcode=%d\n", cmd);
1419*d14abf15SRobert Mustacchi DbgBreakIfAll(TRUE);
1420*d14abf15SRobert Mustacchi }
1421*d14abf15SRobert Mustacchi }
1422*d14abf15SRobert Mustacchi
lm_tcp_rx_process_cqes(lm_device_t * pdev,u8_t drv_toe_rss_id,s_list_t * connections)1423*d14abf15SRobert Mustacchi u8_t lm_tcp_rx_process_cqes(lm_device_t *pdev, u8_t drv_toe_rss_id, s_list_t * connections)
1424*d14abf15SRobert Mustacchi {
1425*d14abf15SRobert Mustacchi lm_tcp_rcq_t *rcq;
1426*d14abf15SRobert Mustacchi lm_tcp_grq_t *grq;
1427*d14abf15SRobert Mustacchi struct toe_rx_cqe *cqe, *hist_cqe;
1428*d14abf15SRobert Mustacchi lm_tcp_state_t *tcp = NULL;
1429*d14abf15SRobert Mustacchi u32_t cid;
1430*d14abf15SRobert Mustacchi u32_t avg_dpc_cnt;
1431*d14abf15SRobert Mustacchi u16_t cq_new_idx;
1432*d14abf15SRobert Mustacchi u16_t cq_old_idx;
1433*d14abf15SRobert Mustacchi u16_t num_to_reproduce = 0;
1434*d14abf15SRobert Mustacchi u8_t defer_cqe;
1435*d14abf15SRobert Mustacchi u8_t process_rss_upd_later = FALSE;
1436*d14abf15SRobert Mustacchi MM_INIT_TCP_LOCK_HANDLE();
1437*d14abf15SRobert Mustacchi
1438*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int , "###lm_tcp_rx_process_cqes START\n");
1439*d14abf15SRobert Mustacchi
1440*d14abf15SRobert Mustacchi rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
1441*d14abf15SRobert Mustacchi grq = &pdev->toe_info.grqs[drv_toe_rss_id];
1442*d14abf15SRobert Mustacchi cq_new_idx = *(rcq->hw_con_idx_ptr);
1443*d14abf15SRobert Mustacchi cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1444*d14abf15SRobert Mustacchi DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) < 0);
1445*d14abf15SRobert Mustacchi
1446*d14abf15SRobert Mustacchi /* save statistics */
1447*d14abf15SRobert Mustacchi rcq->num_cqes_last_dpc = S16_SUB(cq_new_idx, cq_old_idx);
1448*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int, "###lm_tcp_rx_process_cqes num_cqes=%d\n", rcq->num_cqes_last_dpc);
1449*d14abf15SRobert Mustacchi
1450*d14abf15SRobert Mustacchi if (rcq->num_cqes_last_dpc) { /* Exclude zeroed value from statistics*/
1451*d14abf15SRobert Mustacchi if(rcq->max_cqes_per_dpc < rcq->num_cqes_last_dpc) {
1452*d14abf15SRobert Mustacchi rcq->max_cqes_per_dpc = rcq->num_cqes_last_dpc;
1453*d14abf15SRobert Mustacchi }
1454*d14abf15SRobert Mustacchi /* we don't want to wrap around...*/
1455*d14abf15SRobert Mustacchi if ((rcq->sum_cqes_last_x_dpcs + rcq->num_cqes_last_dpc) < rcq->sum_cqes_last_x_dpcs) {
1456*d14abf15SRobert Mustacchi rcq->avg_dpc_cnt = 0;
1457*d14abf15SRobert Mustacchi rcq->sum_cqes_last_x_dpcs = 0;
1458*d14abf15SRobert Mustacchi }
1459*d14abf15SRobert Mustacchi rcq->sum_cqes_last_x_dpcs += rcq->num_cqes_last_dpc;
1460*d14abf15SRobert Mustacchi rcq->avg_dpc_cnt++;
1461*d14abf15SRobert Mustacchi avg_dpc_cnt = rcq->avg_dpc_cnt;
1462*d14abf15SRobert Mustacchi if (avg_dpc_cnt) { /*Prevent division by 0*/
1463*d14abf15SRobert Mustacchi rcq->avg_cqes_per_dpc = rcq->sum_cqes_last_x_dpcs / avg_dpc_cnt;
1464*d14abf15SRobert Mustacchi } else {
1465*d14abf15SRobert Mustacchi rcq->sum_cqes_last_x_dpcs = 0;
1466*d14abf15SRobert Mustacchi }
1467*d14abf15SRobert Mustacchi }
1468*d14abf15SRobert Mustacchi
1469*d14abf15SRobert Mustacchi
1470*d14abf15SRobert Mustacchi /* if we are suspended, we need to check if we can resume processing */
1471*d14abf15SRobert Mustacchi if (rcq->suspend_processing == TRUE) {
1472*d14abf15SRobert Mustacchi lm_tcp_rss_update_suspend_rcq(pdev, rcq);
1473*d14abf15SRobert Mustacchi if (rcq->suspend_processing == TRUE) {
1474*d14abf15SRobert Mustacchi /* skip the consumption loop */
1475*d14abf15SRobert Mustacchi cq_new_idx = cq_old_idx;
1476*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int, "lm_tcp_rx_process_cqes(): rcq suspended - idx:%d\n", drv_toe_rss_id);
1477*d14abf15SRobert Mustacchi }
1478*d14abf15SRobert Mustacchi }
1479*d14abf15SRobert Mustacchi
1480*d14abf15SRobert Mustacchi while(cq_old_idx != cq_new_idx) {
1481*d14abf15SRobert Mustacchi u32_t update_stats_type;
1482*d14abf15SRobert Mustacchi u8_t opcode;
1483*d14abf15SRobert Mustacchi
1484*d14abf15SRobert Mustacchi DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
1485*d14abf15SRobert Mustacchi
1486*d14abf15SRobert Mustacchi /* get next consumed cqe */
1487*d14abf15SRobert Mustacchi cqe = lm_toe_bd_chain_consume_bd(&rcq->bd_chain);
1488*d14abf15SRobert Mustacchi update_stats_type = cqe->data.raw_data;
1489*d14abf15SRobert Mustacchi DbgBreakIf(!cqe);
1490*d14abf15SRobert Mustacchi num_to_reproduce++;
1491*d14abf15SRobert Mustacchi
1492*d14abf15SRobert Mustacchi /* get cid and opcode from cqe */
1493*d14abf15SRobert Mustacchi cid = SW_CID(((cqe->params1 & TOE_RX_CQE_CID) >> TOE_RX_CQE_CID_SHIFT));
1494*d14abf15SRobert Mustacchi opcode = (cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT;
1495*d14abf15SRobert Mustacchi
1496*d14abf15SRobert Mustacchi if (opcode == RAMROD_OPCODE_TOE_RSS_UPDATE) {
1497*d14abf15SRobert Mustacchi
1498*d14abf15SRobert Mustacchi /* update the saved consumer */
1499*d14abf15SRobert Mustacchi cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1500*d14abf15SRobert Mustacchi
1501*d14abf15SRobert Mustacchi /* rss update ramrod */
1502*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4int, "lm_tcp_rx_process_cqes(): calling lm_tcp_rss_update_ramrod_comp - drv_toe_rss_id:%d\n", drv_toe_rss_id);
1503*d14abf15SRobert Mustacchi if (num_to_reproduce > 1) {
1504*d14abf15SRobert Mustacchi process_rss_upd_later = TRUE;
1505*d14abf15SRobert Mustacchi lm_tcp_rss_update_ramrod_comp(pdev, rcq, cid, update_stats_type, FALSE);
1506*d14abf15SRobert Mustacchi break;
1507*d14abf15SRobert Mustacchi }
1508*d14abf15SRobert Mustacchi lm_tcp_rss_update_ramrod_comp(pdev, rcq, cid, update_stats_type, TRUE);
1509*d14abf15SRobert Mustacchi
1510*d14abf15SRobert Mustacchi /* suspend further RCQ processing (if needed) */
1511*d14abf15SRobert Mustacchi if (rcq->suspend_processing == TRUE)
1512*d14abf15SRobert Mustacchi break;
1513*d14abf15SRobert Mustacchi else
1514*d14abf15SRobert Mustacchi continue;
1515*d14abf15SRobert Mustacchi
1516*d14abf15SRobert Mustacchi }
1517*d14abf15SRobert Mustacchi
1518*d14abf15SRobert Mustacchi if (cid < MAX_ETH_REG_CONS) {
1519*d14abf15SRobert Mustacchi /* toe init ramrod */
1520*d14abf15SRobert Mustacchi DbgBreakIf(((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT)
1521*d14abf15SRobert Mustacchi != RAMROD_OPCODE_TOE_INIT);
1522*d14abf15SRobert Mustacchi lm_tcp_init_ramrod_comp(pdev);
1523*d14abf15SRobert Mustacchi cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1524*d14abf15SRobert Mustacchi DbgBreakIf(cq_old_idx != cq_new_idx);
1525*d14abf15SRobert Mustacchi /* We need to update the slow-path ring. This is usually done in the lm_tcp_rx_complete_sp_cqes,
1526*d14abf15SRobert Mustacchi * but we won't get there since this completion is not associated with a connection. USUALLY we
1527*d14abf15SRobert Mustacchi * have to update the sp-ring only AFTER we've written the CQ producer, this is to promise that there
1528*d14abf15SRobert Mustacchi * will always be an empty entry for another ramrod completion, but in this case we're safe, since only
1529*d14abf15SRobert Mustacchi * one CQE is occupied anyway */
1530*d14abf15SRobert Mustacchi lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INIT, TOE_CONNECTION_TYPE, LM_SW_LEADING_RSS_CID(pdev));
1531*d14abf15SRobert Mustacchi break;
1532*d14abf15SRobert Mustacchi }
1533*d14abf15SRobert Mustacchi
1534*d14abf15SRobert Mustacchi tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
1535*d14abf15SRobert Mustacchi DbgBreakIf(!tcp);
1536*d14abf15SRobert Mustacchi /* save cqe in history_cqes */
1537*d14abf15SRobert Mustacchi hist_cqe = (struct toe_rx_cqe *)lm_tcp_qe_buffer_next_cqe_override(&tcp->rx_con->history_cqes);
1538*d14abf15SRobert Mustacchi *hist_cqe = *cqe;
1539*d14abf15SRobert Mustacchi
1540*d14abf15SRobert Mustacchi /* ASSUMPTION: if COMP_DEFERRED changes from FALSE to TRUE, the change occurs only in DPC
1541*d14abf15SRobert Mustacchi * o/w it can only change from TRUE to FALSE.
1542*d14abf15SRobert Mustacchi *
1543*d14abf15SRobert Mustacchi * Read flag w/o lock. Flag may change by the time we call rx_defer_cqe
1544*d14abf15SRobert Mustacchi * Need to check again under lock. We want to avoid acquiring the lock every DPC */
1545*d14abf15SRobert Mustacchi defer_cqe = ((tcp->rx_con->flags & TCP_RX_COMP_DEFERRED) == TCP_RX_COMP_DEFERRED);
1546*d14abf15SRobert Mustacchi if (defer_cqe) {
1547*d14abf15SRobert Mustacchi /* if we're deferring completions - just store the cqe and continue to the next one
1548*d14abf15SRobert Mustacchi * Assumptions: ALL commands need to be deferred, we aren't expecting any command on
1549*d14abf15SRobert Mustacchi * L4 that we should pay attention to for this connection ( only one outstanding sp at a time ) */
1550*d14abf15SRobert Mustacchi /* Return if we are still deferred (may have changed since initial check was w/o a lock */
1551*d14abf15SRobert Mustacchi mm_acquire_tcp_lock(pdev, tcp->rx_con);
1552*d14abf15SRobert Mustacchi /* check again under lock if we're deferred */
1553*d14abf15SRobert Mustacchi defer_cqe = ((tcp->rx_con->flags & TCP_RX_COMP_DEFERRED) == TCP_RX_COMP_DEFERRED);
1554*d14abf15SRobert Mustacchi if (defer_cqe) {
1555*d14abf15SRobert Mustacchi tcp->rx_con->flags |= TCP_DEFERRED_PROCESSING;
1556*d14abf15SRobert Mustacchi
1557*d14abf15SRobert Mustacchi /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1558*d14abf15SRobert Mustacchi * release the tcp lock if cqe is offload complete */
1559*d14abf15SRobert Mustacchi if (((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) == RAMROD_OPCODE_TOE_INITIATE_OFFLOAD)
1560*d14abf15SRobert Mustacchi {
1561*d14abf15SRobert Mustacchi mm_release_tcp_lock(pdev, tcp->rx_con);
1562*d14abf15SRobert Mustacchi }
1563*d14abf15SRobert Mustacchi
1564*d14abf15SRobert Mustacchi lm_tcp_rx_process_cqe(pdev,cqe,tcp,drv_toe_rss_id);
1565*d14abf15SRobert Mustacchi }
1566*d14abf15SRobert Mustacchi
1567*d14abf15SRobert Mustacchi /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1568*d14abf15SRobert Mustacchi * release the tcp lock if cqe is not offload complete (was released earlier) */
1569*d14abf15SRobert Mustacchi if (((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) != RAMROD_OPCODE_TOE_INITIATE_OFFLOAD)
1570*d14abf15SRobert Mustacchi {
1571*d14abf15SRobert Mustacchi mm_release_tcp_lock(pdev, tcp->rx_con);
1572*d14abf15SRobert Mustacchi }
1573*d14abf15SRobert Mustacchi }
1574*d14abf15SRobert Mustacchi
1575*d14abf15SRobert Mustacchi if (!defer_cqe) {
1576*d14abf15SRobert Mustacchi /* connections will always be initialized to a dummy, so once a tcp connection is added to the
1577*d14abf15SRobert Mustacchi * list, it's link will be initialized to point to another link other than NULL */
1578*d14abf15SRobert Mustacchi if (s_list_next_entry(&tcp->rx_con->dpc_info.link) == NULL) {
1579*d14abf15SRobert Mustacchi s_list_push_head(connections, &tcp->rx_con->dpc_info.link);
1580*d14abf15SRobert Mustacchi }
1581*d14abf15SRobert Mustacchi lm_tcp_rx_process_cqe(pdev, cqe, tcp, drv_toe_rss_id);
1582*d14abf15SRobert Mustacchi }
1583*d14abf15SRobert Mustacchi
1584*d14abf15SRobert Mustacchi cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1585*d14abf15SRobert Mustacchi }
1586*d14abf15SRobert Mustacchi
1587*d14abf15SRobert Mustacchi /* We may have nothing to reproduce if we were called from a sw_dpc */
1588*d14abf15SRobert Mustacchi if (num_to_reproduce) {
1589*d14abf15SRobert Mustacchi lm_toe_bd_chain_bds_produced(&rcq->bd_chain, num_to_reproduce);
1590*d14abf15SRobert Mustacchi
1591*d14abf15SRobert Mustacchi /* GilR 5/13/2006 - TBA - save some stats? */
1592*d14abf15SRobert Mustacchi
1593*d14abf15SRobert Mustacchi /* notify the fw of the prod of the RCQ */
1594*d14abf15SRobert Mustacchi LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) , PORT_ID(pdev)),
1595*d14abf15SRobert Mustacchi lm_bd_chain_prod_idx(&rcq->bd_chain), BAR_USTRORM_INTMEM);
1596*d14abf15SRobert Mustacchi
1597*d14abf15SRobert Mustacchi if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
1598*d14abf15SRobert Mustacchi u32_t l4_quasi_byte_counter;
1599*d14abf15SRobert Mustacchi u16_t prod_idx_diff = lm_bd_chain_prod_idx(&rcq->bd_chain) - rcq->bd_chain.bds_per_page * rcq->bd_chain.page_cnt;
1600*d14abf15SRobert Mustacchi l4_quasi_byte_counter = prod_idx_diff;
1601*d14abf15SRobert Mustacchi l4_quasi_byte_counter <<= 16;
1602*d14abf15SRobert Mustacchi //fIXME
1603*d14abf15SRobert Mustacchi LM_INTMEM_WRITE32(pdev, rcq->hc_sb_info.iro_dhc_offset, l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
1604*d14abf15SRobert Mustacchi }
1605*d14abf15SRobert Mustacchi }
1606*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int , "###lm_tcp_rx_process_cqes END\n");
1607*d14abf15SRobert Mustacchi return process_rss_upd_later;
1608*d14abf15SRobert Mustacchi }
1609*d14abf15SRobert Mustacchi
1610*d14abf15SRobert Mustacchi /** Description
1611*d14abf15SRobert Mustacchi * compensate the grq
1612*d14abf15SRobert Mustacchi * Assumption:
1613*d14abf15SRobert Mustacchi * called under the GRQ LOCK
1614*d14abf15SRobert Mustacchi */
lm_tcp_rx_compensate_grq(lm_device_t * pdev,u8_t drv_toe_rss_id)1615*d14abf15SRobert Mustacchi void lm_tcp_rx_compensate_grq(lm_device_t * pdev, u8_t drv_toe_rss_id)
1616*d14abf15SRobert Mustacchi {
1617*d14abf15SRobert Mustacchi d_list_t * collected_gen_bufs_list = &pdev->toe_info.grqs[drv_toe_rss_id].aux_gen_list;
1618*d14abf15SRobert Mustacchi
1619*d14abf15SRobert Mustacchi MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1620*d14abf15SRobert Mustacchi if (lm_tcp_rx_fill_grq(pdev, drv_toe_rss_id, collected_gen_bufs_list,FILL_GRQ_FULL)) {
1621*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4rx, "lm_toe_service_rx_intr: Updating GRQ producer\n");
1622*d14abf15SRobert Mustacchi /* notify the fw of the prod of the GRQ */
1623*d14abf15SRobert Mustacchi LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) , PORT_ID(pdev)),
1624*d14abf15SRobert Mustacchi lm_bd_chain_prod_idx(&pdev->toe_info.grqs[drv_toe_rss_id].bd_chain), BAR_USTRORM_INTMEM);
1625*d14abf15SRobert Mustacchi }
1626*d14abf15SRobert Mustacchi /* check if occupancy is above threshold */
1627*d14abf15SRobert Mustacchi if (pdev->toe_info.grqs[drv_toe_rss_id].bd_chain.capacity - pdev->toe_info.grqs[drv_toe_rss_id].bd_chain.bd_left < GRQ_XON_TH) {
1628*d14abf15SRobert Mustacchi pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = TRUE;
1629*d14abf15SRobert Mustacchi } else {
1630*d14abf15SRobert Mustacchi pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = FALSE;
1631*d14abf15SRobert Mustacchi }
1632*d14abf15SRobert Mustacchi
1633*d14abf15SRobert Mustacchi MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1634*d14abf15SRobert Mustacchi
1635*d14abf15SRobert Mustacchi if (!d_list_is_empty(collected_gen_bufs_list)) {
1636*d14abf15SRobert Mustacchi mm_tcp_return_list_of_gen_bufs(pdev,collected_gen_bufs_list,0, NON_EXISTENT_SB_IDX);
1637*d14abf15SRobert Mustacchi d_list_clear(collected_gen_bufs_list);
1638*d14abf15SRobert Mustacchi }
1639*d14abf15SRobert Mustacchi }
1640*d14abf15SRobert Mustacchi
lm_tcp_rx_lock_grq(lm_device_t * pdev,u8_t drv_toe_rss_id)1641*d14abf15SRobert Mustacchi static __inline void lm_tcp_rx_lock_grq(lm_device_t *pdev, u8_t drv_toe_rss_id)
1642*d14abf15SRobert Mustacchi {
1643*d14abf15SRobert Mustacchi /* If we've asked for compensation on allocation (which is only set from within a dpc)
1644*d14abf15SRobert Mustacchi * there is a risk of the grq being accessed from a different context (the alloc context)
1645*d14abf15SRobert Mustacchi * therefore, we cancel this option. Needs to be under lock in case alloc context is already
1646*d14abf15SRobert Mustacchi * compensating */
1647*d14abf15SRobert Mustacchi if (pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc) {
1648*d14abf15SRobert Mustacchi MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1649*d14abf15SRobert Mustacchi pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = FALSE;
1650*d14abf15SRobert Mustacchi MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1651*d14abf15SRobert Mustacchi }
1652*d14abf15SRobert Mustacchi }
1653*d14abf15SRobert Mustacchi
lm_toe_service_rx_intr(lm_device_t * pdev,u8_t drv_toe_rss_id)1654*d14abf15SRobert Mustacchi void lm_toe_service_rx_intr(lm_device_t *pdev, u8_t drv_toe_rss_id)
1655*d14abf15SRobert Mustacchi {
1656*d14abf15SRobert Mustacchi s_list_t connections;
1657*d14abf15SRobert Mustacchi s_list_entry_t dummy;
1658*d14abf15SRobert Mustacchi lm_tcp_con_t * con;
1659*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp;
1660*d14abf15SRobert Mustacchi u32_t dbg_loop_cnt = 0;
1661*d14abf15SRobert Mustacchi u8_t process_rss_upd;
1662*d14abf15SRobert Mustacchi
1663*d14abf15SRobert Mustacchi MM_INIT_TCP_LOCK_HANDLE();
1664*d14abf15SRobert Mustacchi
1665*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int , "###lm_toe_service_rx_intr START\n");
1666*d14abf15SRobert Mustacchi DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.rcqs) > drv_toe_rss_id));
1667*d14abf15SRobert Mustacchi
1668*d14abf15SRobert Mustacchi /* lock the grq from external access: i.e.. allocation compensation */
1669*d14abf15SRobert Mustacchi lm_tcp_rx_lock_grq(pdev, drv_toe_rss_id);
1670*d14abf15SRobert Mustacchi
1671*d14abf15SRobert Mustacchi while (TRUE) {
1672*d14abf15SRobert Mustacchi dbg_loop_cnt++;
1673*d14abf15SRobert Mustacchi s_list_clear(&connections);
1674*d14abf15SRobert Mustacchi s_list_push_head(&connections, &dummy);
1675*d14abf15SRobert Mustacchi /* process the cqes and initialize connections with all the connections that appeared
1676*d14abf15SRobert Mustacchi * in the DPC */
1677*d14abf15SRobert Mustacchi process_rss_upd = lm_tcp_rx_process_cqes(pdev,drv_toe_rss_id,&connections);
1678*d14abf15SRobert Mustacchi
1679*d14abf15SRobert Mustacchi /* Compensate the GRQ with generic buffers from the pool : process_cqes takes buffers from the grq */
1680*d14abf15SRobert Mustacchi lm_tcp_rx_compensate_grq(pdev,drv_toe_rss_id);
1681*d14abf15SRobert Mustacchi
1682*d14abf15SRobert Mustacchi /* FP: traverse the connections. remember to ignore the last one */
1683*d14abf15SRobert Mustacchi con = (lm_tcp_con_t *)s_list_peek_head(&connections);
1684*d14abf15SRobert Mustacchi tcp = con->tcp_state;
1685*d14abf15SRobert Mustacchi while (s_list_next_entry(&con->dpc_info.link) != NULL) {
1686*d14abf15SRobert Mustacchi mm_acquire_tcp_lock(pdev, con);
1687*d14abf15SRobert Mustacchi lm_tcp_rx_complete_tcp_fp(pdev, con->tcp_state, con);
1688*d14abf15SRobert Mustacchi mm_release_tcp_lock(pdev, con);
1689*d14abf15SRobert Mustacchi con = (lm_tcp_con_t *)s_list_next_entry(&con->dpc_info.link);
1690*d14abf15SRobert Mustacchi tcp = con->tcp_state;
1691*d14abf15SRobert Mustacchi }
1692*d14abf15SRobert Mustacchi
1693*d14abf15SRobert Mustacchi /* SP : traverse the connections. remember to ignore the last one */
1694*d14abf15SRobert Mustacchi con = (lm_tcp_con_t *)s_list_pop_head(&connections);
1695*d14abf15SRobert Mustacchi s_list_next_entry(&con->dpc_info.link) = NULL;
1696*d14abf15SRobert Mustacchi tcp = con->tcp_state;
1697*d14abf15SRobert Mustacchi while (s_list_entry_cnt(&connections) > 0) {
1698*d14abf15SRobert Mustacchi /* we access snapshot and not dpc, since once the dpc_flags were copied
1699*d14abf15SRobert Mustacchi * to snapshot they were zeroized */
1700*d14abf15SRobert Mustacchi if (con->dpc_info.snapshot_flags) {
1701*d14abf15SRobert Mustacchi lm_tcp_rx_complete_tcp_sp(pdev, tcp, con);
1702*d14abf15SRobert Mustacchi }
1703*d14abf15SRobert Mustacchi con = (lm_tcp_con_t *)s_list_pop_head(&connections);
1704*d14abf15SRobert Mustacchi s_list_next_entry(&con->dpc_info.link) = NULL;
1705*d14abf15SRobert Mustacchi tcp = con->tcp_state;
1706*d14abf15SRobert Mustacchi }
1707*d14abf15SRobert Mustacchi
1708*d14abf15SRobert Mustacchi if (process_rss_upd) {
1709*d14abf15SRobert Mustacchi lm_tcp_rss_update_suspend_rcq(pdev,&pdev->toe_info.rcqs[drv_toe_rss_id]);
1710*d14abf15SRobert Mustacchi if (!pdev->toe_info.rcqs[drv_toe_rss_id].suspend_processing) {
1711*d14abf15SRobert Mustacchi pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_continued++;
1712*d14abf15SRobert Mustacchi continue;
1713*d14abf15SRobert Mustacchi }
1714*d14abf15SRobert Mustacchi }
1715*d14abf15SRobert Mustacchi break;
1716*d14abf15SRobert Mustacchi }
1717*d14abf15SRobert Mustacchi if (pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_max_continued < dbg_loop_cnt) {
1718*d14abf15SRobert Mustacchi pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_max_continued = dbg_loop_cnt;
1719*d14abf15SRobert Mustacchi }
1720*d14abf15SRobert Mustacchi
1721*d14abf15SRobert Mustacchi if (pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta || pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta) {
1722*d14abf15SRobert Mustacchi MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC(pdev);
1723*d14abf15SRobert Mustacchi lm_tcp_update_isles_cnts(pdev, pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta,
1724*d14abf15SRobert Mustacchi pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta);
1725*d14abf15SRobert Mustacchi MM_RELEASE_ISLES_CONTROL_LOCK_DPC(pdev);
1726*d14abf15SRobert Mustacchi pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta = pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta = 0;
1727*d14abf15SRobert Mustacchi }
1728*d14abf15SRobert Mustacchi
1729*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4int , "###lm_toe_service_rx_intr END\n");
1730*d14abf15SRobert Mustacchi }
1731*d14abf15SRobert Mustacchi
1732*d14abf15SRobert Mustacchi /** Description:
1733*d14abf15SRobert Mustacchi * Post a single tcp buffer to the Rx bd chain
1734*d14abf15SRobert Mustacchi * Assumptions:
1735*d14abf15SRobert Mustacchi * - caller initiated tcp_buf->flags field with BUFFER_START/BUFFER_END/PUSH appropriately
1736*d14abf15SRobert Mustacchi * Returns:
1737*d14abf15SRobert Mustacchi * - SUCCESS - tcp buf was successfully attached to the bd chain
1738*d14abf15SRobert Mustacchi * - RESOURCE - not enough available BDs on bd chain for given tcp buf
1739*d14abf15SRobert Mustacchi * - CONNECTION_CLOSED - whenever connection's flag are marked as 'POST BLOCKED' */
lm_tcp_rx_post_buf(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,lm_frag_list_t * frag_list)1740*d14abf15SRobert Mustacchi lm_status_t lm_tcp_rx_post_buf(
1741*d14abf15SRobert Mustacchi struct _lm_device_t *pdev,
1742*d14abf15SRobert Mustacchi lm_tcp_state_t *tcp,
1743*d14abf15SRobert Mustacchi lm_tcp_buffer_t *tcp_buf,
1744*d14abf15SRobert Mustacchi lm_frag_list_t *frag_list
1745*d14abf15SRobert Mustacchi )
1746*d14abf15SRobert Mustacchi {
1747*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con;
1748*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
1749*d14abf15SRobert Mustacchi lm_status_t lm_stat = LM_STATUS_SUCCESS;
1750*d14abf15SRobert Mustacchi d_list_t return_list; /* buffers to return to pool in case of copying to buffer */
1751*d14abf15SRobert Mustacchi u32_t copied_bytes = 0;
1752*d14abf15SRobert Mustacchi u32_t add_sws_bytes = 0;
1753*d14abf15SRobert Mustacchi u8_t split_buffer = FALSE;
1754*d14abf15SRobert Mustacchi
1755*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf cid=%d\n", tcp->cid);
1756*d14abf15SRobert Mustacchi DbgBreakIf(!(pdev && tcp));
1757*d14abf15SRobert Mustacchi DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
1758*d14abf15SRobert Mustacchi /* (tcp_buf==NULL <=> frag_list==NULL) && (frag_list!= NULL => frag_list->cnt != 0) */
1759*d14abf15SRobert Mustacchi DbgBreakIf( ( ! ( ( (!tcp_buf) && (!frag_list) ) || (tcp_buf && frag_list) ) ) ||
1760*d14abf15SRobert Mustacchi ( frag_list && (frag_list->cnt == 0) ) );
1761*d14abf15SRobert Mustacchi
1762*d14abf15SRobert Mustacchi rx_con = tcp->rx_con;
1763*d14abf15SRobert Mustacchi if ( GET_FLAGS(rx_con->flags, TCP_RX_POST_BLOCKED) ) {
1764*d14abf15SRobert Mustacchi // DbgBreakIf(!tcp_buf); /* (lm_tcp_rx_post_buf design guides VBD doc) */
1765*d14abf15SRobert Mustacchi if (!tcp_buf) {
1766*d14abf15SRobert Mustacchi tcp->rx_con->zb_rx_post_blocked++;
1767*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
1768*d14abf15SRobert Mustacchi } else {
1769*d14abf15SRobert Mustacchi tcp->rx_con->rx_post_blocked++;
1770*d14abf15SRobert Mustacchi return LM_STATUS_CONNECTION_CLOSED;
1771*d14abf15SRobert Mustacchi }
1772*d14abf15SRobert Mustacchi }
1773*d14abf15SRobert Mustacchi
1774*d14abf15SRobert Mustacchi /* TCP_POST_DELAYED is turned on when the lm can not process new buffers for some reason, but not permanently
1775*d14abf15SRobert Mustacchi * Assumption: UM will eventually try to repost this buffer... */
1776*d14abf15SRobert Mustacchi if ( GET_FLAGS(rx_con->flags, TCP_POST_DELAYED)) {
1777*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE;
1778*d14abf15SRobert Mustacchi }
1779*d14abf15SRobert Mustacchi
1780*d14abf15SRobert Mustacchi RESET_FLAGS(rx_con->flags, TCP_INDICATE_REJECTED);
1781*d14abf15SRobert Mustacchi
1782*d14abf15SRobert Mustacchi /* set tcp_buf fields */
1783*d14abf15SRobert Mustacchi if (tcp_buf) {
1784*d14abf15SRobert Mustacchi /* check bd chain availability */
1785*d14abf15SRobert Mustacchi if(lm_bd_chain_avail_bds(&rx_con->bd_chain) < frag_list->cnt) {
1786*d14abf15SRobert Mustacchi DbgBreakIf(s_list_is_empty(&rx_con->active_tb_list));
1787*d14abf15SRobert Mustacchi /* Check if the last placed BD was part of a split buffer (no end flag) if so, mark is at special split-end
1788*d14abf15SRobert Mustacchi * and give a doorbell as if it was with END. Also, block UM from giving us more buffers until we've completed
1789*d14abf15SRobert Mustacchi * this one (See L4 VBD Spec for more details on "Large Application Buffers" */
1790*d14abf15SRobert Mustacchi if (!(GET_FLAGS(rx_con->u.rx.last_rx_bd->flags , TOE_RX_BD_END))) {
1791*d14abf15SRobert Mustacchi SET_FLAGS(rx_con->u.rx.last_rx_bd->flags, (TOE_RX_BD_END | TOE_RX_BD_SPLIT));
1792*d14abf15SRobert Mustacchi /* Mark the last buffer in active-tb-list as 'special' so that we know when we complete it that we can
1793*d14abf15SRobert Mustacchi * unblock UM... */
1794*d14abf15SRobert Mustacchi tcp_buf = (lm_tcp_buffer_t *)s_list_peek_tail(&rx_con->active_tb_list);
1795*d14abf15SRobert Mustacchi SET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_SPLIT);
1796*d14abf15SRobert Mustacchi SET_FLAGS(rx_con->flags, TCP_POST_DELAYED);
1797*d14abf15SRobert Mustacchi lm_tcp_rx_write_db(pdev, tcp);
1798*d14abf15SRobert Mustacchi }
1799*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4rx, "post rx buf failed, rx chain is full (cid=%d, avail bds=%d, buf nfrags=%d)\n",
1800*d14abf15SRobert Mustacchi tcp->cid, lm_bd_chain_avail_bds(&rx_con->bd_chain), frag_list->cnt);
1801*d14abf15SRobert Mustacchi return LM_STATUS_RESOURCE;
1802*d14abf15SRobert Mustacchi }
1803*d14abf15SRobert Mustacchi
1804*d14abf15SRobert Mustacchi tcp_buf->size = tcp_buf->more_to_comp = (u32_t)frag_list->size;
1805*d14abf15SRobert Mustacchi tcp_buf->bd_used = 0; /* will be modified if buffer will be posted */
1806*d14abf15SRobert Mustacchi DbgBreakIf(!(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START ?
1807*d14abf15SRobert Mustacchi rx_con->app_buf_bytes_acc_post == 0 :
1808*d14abf15SRobert Mustacchi rx_con->app_buf_bytes_acc_post > 0));
1809*d14abf15SRobert Mustacchi rx_con->app_buf_bytes_acc_post += tcp_buf->size;
1810*d14abf15SRobert Mustacchi
1811*d14abf15SRobert Mustacchi /* special care in case of last tcp buffer of an application buffer */
1812*d14abf15SRobert Mustacchi if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1813*d14abf15SRobert Mustacchi tcp_buf->app_buf_xferred = 0; /* just for safety */
1814*d14abf15SRobert Mustacchi tcp_buf->app_buf_size = rx_con->app_buf_bytes_acc_post;
1815*d14abf15SRobert Mustacchi rx_con->app_buf_bytes_acc_post = 0;
1816*d14abf15SRobert Mustacchi }
1817*d14abf15SRobert Mustacchi split_buffer = !(GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_START) && GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_END));
1818*d14abf15SRobert Mustacchi } else {
1819*d14abf15SRobert Mustacchi /* zero-byte request */
1820*d14abf15SRobert Mustacchi rx_con->u.rx.rx_zero_byte_recv_reqs++;
1821*d14abf15SRobert Mustacchi }
1822*d14abf15SRobert Mustacchi
1823*d14abf15SRobert Mustacchi /* we could be in the middle of completing a split-buffer... this is in case the previous split buffer was posted partially and we got a
1824*d14abf15SRobert Mustacchi * cmp with push... need to complete it here. */
1825*d14abf15SRobert Mustacchi if (GET_FLAGS(rx_con->flags, TCP_POST_COMPLETE_SPLIT)) {
1826*d14abf15SRobert Mustacchi DbgBreakIf(!split_buffer); /* we can only be in this state if we're completing split buffers... */
1827*d14abf15SRobert Mustacchi rx_con->bytes_push_skip_cnt += tcp_buf->more_to_comp; /* how many bytes did we skip? */
1828*d14abf15SRobert Mustacchi tcp_buf->more_to_comp = 0;
1829*d14abf15SRobert Mustacchi rx_con->partially_completed_buf_cnt++;
1830*d14abf15SRobert Mustacchi /* complete buffer */
1831*d14abf15SRobert Mustacchi s_list_push_tail(&(tcp->rx_con->active_tb_list), &(tcp_buf->link));
1832*d14abf15SRobert Mustacchi rx_con->rq_nbytes += tcp_buf->size;
1833*d14abf15SRobert Mustacchi rx_con->buffer_skip_post_cnt++;
1834*d14abf15SRobert Mustacchi lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,0);
1835*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
1836*d14abf15SRobert Mustacchi }
1837*d14abf15SRobert Mustacchi
1838*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
1839*d14abf15SRobert Mustacchi
1840*d14abf15SRobert Mustacchi if ( gen_info->peninsula_nbytes ) {
1841*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf WITH GENERIC, cid=%d, tcp_buf=%p, buf_size=%d, buf_flags=%d, peninsula_nbytes=%d\n",
1842*d14abf15SRobert Mustacchi tcp->cid, tcp_buf, frag_list ? frag_list->size : 0, tcp_buf ? tcp_buf->flags : 0, rx_con->u.rx.gen_info.peninsula_nbytes);
1843*d14abf15SRobert Mustacchi if (tcp_buf) {
1844*d14abf15SRobert Mustacchi d_list_init(&return_list, NULL, NULL, 0);
1845*d14abf15SRobert Mustacchi copied_bytes = lm_tcp_rx_peninsula_to_rq_copy(pdev,tcp,tcp_buf,&return_list, 0xffffffff, FALSE);
1846*d14abf15SRobert Mustacchi gen_info->bytes_copied_cnt_in_post += copied_bytes;
1847*d14abf15SRobert Mustacchi if (!d_list_is_empty(&return_list)) {
1848*d14abf15SRobert Mustacchi lm_tcp_return_list_of_gen_bufs(pdev,tcp,&return_list, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
1849*d14abf15SRobert Mustacchi }
1850*d14abf15SRobert Mustacchi if ((copied_bytes == tcp_buf->size) && !split_buffer && s_list_is_empty(&rx_con->active_tb_list)) {
1851*d14abf15SRobert Mustacchi /* consumed_cnt: our way of telling fw we bypassed it */
1852*d14abf15SRobert Mustacchi lm_tcp_incr_consumed_gen(pdev, tcp, tcp_buf->size);
1853*d14abf15SRobert Mustacchi /* simulate a _lm_tcp_rx_post_buf for lm_tcp_complete_bufs */
1854*d14abf15SRobert Mustacchi s_list_push_tail(&(tcp->rx_con->active_tb_list), &(tcp_buf->link));
1855*d14abf15SRobert Mustacchi rx_con->rq_nbytes += tcp_buf->size;
1856*d14abf15SRobert Mustacchi rx_con->buffer_skip_post_cnt++;
1857*d14abf15SRobert Mustacchi rx_con->bytes_skip_post_cnt += copied_bytes;
1858*d14abf15SRobert Mustacchi /* If we copied some bytes to the RQ, we can now compensate FW-Window with these copied bytes. */
1859*d14abf15SRobert Mustacchi add_sws_bytes += copied_bytes;
1860*d14abf15SRobert Mustacchi /* this function completes nbytes on the tcp buf and may complete the buffer if more_to_comp = 0*/
1861*d14abf15SRobert Mustacchi lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,copied_bytes);
1862*d14abf15SRobert Mustacchi } else {
1863*d14abf15SRobert Mustacchi /* will be posted and therefore get a SKP at some stage. */
1864*d14abf15SRobert Mustacchi if (!GET_FLAGS(rx_con->flags, TCP_POST_NO_SKP)) {
1865*d14abf15SRobert Mustacchi rx_con->u.rx.skp_bytes_copied += copied_bytes;
1866*d14abf15SRobert Mustacchi }
1867*d14abf15SRobert Mustacchi lm_stat = _lm_tcp_rx_post_buf(pdev, tcp, tcp_buf, frag_list);
1868*d14abf15SRobert Mustacchi DbgBreakIf(lm_stat != LM_STATUS_SUCCESS);
1869*d14abf15SRobert Mustacchi if (copied_bytes && GET_FLAGS(rx_con->flags, TCP_POST_NO_SKP)) {
1870*d14abf15SRobert Mustacchi lm_tcp_rx_write_db(pdev, tcp); /* for the case of split buffer in which bytes/bds are accumulated in bd_more* fields. bd_more* fields must be cleaned at this phase */
1871*d14abf15SRobert Mustacchi rx_con->bytes_comp_cnt += copied_bytes;
1872*d14abf15SRobert Mustacchi /* If we copied some bytes to the RQ, we can now compensate FW-Window with these copied bytes. */
1873*d14abf15SRobert Mustacchi add_sws_bytes += copied_bytes;
1874*d14abf15SRobert Mustacchi /* this function completes nbytes on the tcp buf and may complete the buffer if more_to_comp = 0*/
1875*d14abf15SRobert Mustacchi lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,copied_bytes);
1876*d14abf15SRobert Mustacchi }
1877*d14abf15SRobert Mustacchi }
1878*d14abf15SRobert Mustacchi }
1879*d14abf15SRobert Mustacchi /* if we have something to indicate after copying and it's ok to indicate... - indicate it */
1880*d14abf15SRobert Mustacchi if (gen_info->peninsula_nbytes && _lm_tcp_ok_to_indicate(rx_con)) {
1881*d14abf15SRobert Mustacchi DbgBreakIf(frag_list && (frag_list->size != copied_bytes)); /* can't have bytes left with free space in tcp buf */
1882*d14abf15SRobert Mustacchi mm_tcp_rx_indicate_gen(pdev, tcp);
1883*d14abf15SRobert Mustacchi add_sws_bytes += gen_info->add_sws_bytes; /* any bytes we need to update will be aggregated here during indicate */
1884*d14abf15SRobert Mustacchi gen_info->add_sws_bytes = 0;
1885*d14abf15SRobert Mustacchi
1886*d14abf15SRobert Mustacchi }
1887*d14abf15SRobert Mustacchi } else if (tcp_buf) {
1888*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf NO COPY, cid=%d, tcp_buf=%p, buf_size=%d, buf_flags=%d, peninsula_nbytes=%d\n",
1889*d14abf15SRobert Mustacchi tcp->cid, tcp_buf, frag_list->size, tcp_buf->flags, rx_con->u.rx.gen_info.peninsula_nbytes);
1890*d14abf15SRobert Mustacchi lm_stat = _lm_tcp_rx_post_buf(pdev, tcp, tcp_buf, frag_list);
1891*d14abf15SRobert Mustacchi DbgBreakIf(lm_stat != LM_STATUS_SUCCESS);
1892*d14abf15SRobert Mustacchi }
1893*d14abf15SRobert Mustacchi
1894*d14abf15SRobert Mustacchi if (add_sws_bytes) {
1895*d14abf15SRobert Mustacchi lm_tcp_rx_post_sws(pdev, tcp, rx_con, add_sws_bytes, TCP_RX_POST_SWS_INC);
1896*d14abf15SRobert Mustacchi }
1897*d14abf15SRobert Mustacchi
1898*d14abf15SRobert Mustacchi
1899*d14abf15SRobert Mustacchi return lm_stat;
1900*d14abf15SRobert Mustacchi }
1901*d14abf15SRobert Mustacchi
1902*d14abf15SRobert Mustacchi
1903*d14abf15SRobert Mustacchi /* Assumptions:
1904*d14abf15SRobert Mustacchi * - caller initiated appropriately the following fields:
1905*d14abf15SRobert Mustacchi * - tcp_buf->flags
1906*d14abf15SRobert Mustacchi * - tcp_buf->size, tcp_buf->more_to_comp
1907*d14abf15SRobert Mustacchi * - tcp_buf->app_buf_size, tcp_buf->app_buf_xferred
1908*d14abf15SRobert Mustacchi * - caller verified that there is enough availabe BDs in the BD chain for the given buffer */
_lm_tcp_rx_post_buf(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,lm_frag_list_t * frag_list)1909*d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_rx_post_buf(
1910*d14abf15SRobert Mustacchi struct _lm_device_t *pdev,
1911*d14abf15SRobert Mustacchi lm_tcp_state_t *tcp,
1912*d14abf15SRobert Mustacchi lm_tcp_buffer_t *tcp_buf,
1913*d14abf15SRobert Mustacchi lm_frag_list_t *frag_list
1914*d14abf15SRobert Mustacchi )
1915*d14abf15SRobert Mustacchi {
1916*d14abf15SRobert Mustacchi lm_tcp_con_t *rx_con = tcp->rx_con;
1917*d14abf15SRobert Mustacchi lm_bd_chain_t * rx_chain;
1918*d14abf15SRobert Mustacchi u16_t old_prod, new_prod;
1919*d14abf15SRobert Mustacchi struct toe_rx_bd * rx_bd;
1920*d14abf15SRobert Mustacchi lm_frag_t * frag = frag_list->frag_arr;
1921*d14abf15SRobert Mustacchi u32_t dbg_buf_size = 0;
1922*d14abf15SRobert Mustacchi u32_t bd_bytes_prod; /* Each bd is initialized with a cyclic counter of bytes prod until that bd. */
1923*d14abf15SRobert Mustacchi u16_t flags = 0;
1924*d14abf15SRobert Mustacchi u32_t i;
1925*d14abf15SRobert Mustacchi
1926*d14abf15SRobert Mustacchi /* Number of fragments of entire application buffer can't be bigger
1927*d14abf15SRobert Mustacchi * than size of the BD chain (entire application buffer since we can't
1928*d14abf15SRobert Mustacchi * post partial application buffer to the FW , db_more_bds however includes the "next" bd, so we need
1929*d14abf15SRobert Mustacchi * to take that into consideration as well */
1930*d14abf15SRobert Mustacchi DbgBreakIfAll( (rx_con->db_more_bds + frag_list->cnt) > (u32_t)(rx_con->bd_chain.capacity + rx_con->bd_chain.page_cnt));
1931*d14abf15SRobert Mustacchi
1932*d14abf15SRobert Mustacchi rx_chain = &rx_con->bd_chain;
1933*d14abf15SRobert Mustacchi DbgBreakIf(lm_bd_chain_avail_bds(rx_chain) < frag_list->cnt);
1934*d14abf15SRobert Mustacchi
1935*d14abf15SRobert Mustacchi old_prod = lm_bd_chain_prod_idx(rx_chain);
1936*d14abf15SRobert Mustacchi
1937*d14abf15SRobert Mustacchi /* First BD should have the START flag */
1938*d14abf15SRobert Mustacchi if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START) {
1939*d14abf15SRobert Mustacchi flags = TOE_RX_BD_START;
1940*d14abf15SRobert Mustacchi }
1941*d14abf15SRobert Mustacchi
1942*d14abf15SRobert Mustacchi /* Set NO_PUSH flag if needed */
1943*d14abf15SRobert Mustacchi if ( tcp_buf->flags & TCP_BUF_FLAG_L4_RX_NO_PUSH ) {
1944*d14abf15SRobert Mustacchi flags |= TOE_RX_BD_NO_PUSH;
1945*d14abf15SRobert Mustacchi }
1946*d14abf15SRobert Mustacchi if (tcp_buf->flags & TCP_BUF_FLAG_L4_PARTIAL_FILLED) {
1947*d14abf15SRobert Mustacchi if (!rx_con->partially_filled_buf_sent && !rx_con->rq_completion_calls) {
1948*d14abf15SRobert Mustacchi SET_FLAGS(rx_con->db_data.rx->flags, TOE_RX_DB_DATA_PARTIAL_FILLED_BUF);
1949*d14abf15SRobert Mustacchi } else {
1950*d14abf15SRobert Mustacchi RESET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_PARTIAL_FILLED);
1951*d14abf15SRobert Mustacchi }
1952*d14abf15SRobert Mustacchi rx_con->partially_filled_buf_sent++;
1953*d14abf15SRobert Mustacchi }
1954*d14abf15SRobert Mustacchi /* Attach the first frag to the BD-chain */
1955*d14abf15SRobert Mustacchi bd_bytes_prod = rx_con->db_data.rx->bytes_prod + rx_con->db_more_bytes;
1956*d14abf15SRobert Mustacchi rx_bd = _lm_tcp_rx_set_bd(frag, flags, rx_chain, bd_bytes_prod);
1957*d14abf15SRobert Mustacchi bd_bytes_prod += frag->size;
1958*d14abf15SRobert Mustacchi dbg_buf_size += frag->size;
1959*d14abf15SRobert Mustacchi flags &= ~TOE_RX_BD_START;
1960*d14abf15SRobert Mustacchi frag++;
1961*d14abf15SRobert Mustacchi
1962*d14abf15SRobert Mustacchi /* "attach" the frags to the bd chain */
1963*d14abf15SRobert Mustacchi for(i = 1; i < frag_list->cnt; i++, frag++) {
1964*d14abf15SRobert Mustacchi rx_bd = _lm_tcp_rx_set_bd(frag, flags, rx_chain, bd_bytes_prod);
1965*d14abf15SRobert Mustacchi dbg_buf_size += frag->size;
1966*d14abf15SRobert Mustacchi bd_bytes_prod += frag->size;
1967*d14abf15SRobert Mustacchi }
1968*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.last_rx_bd = rx_bd;
1969*d14abf15SRobert Mustacchi
1970*d14abf15SRobert Mustacchi /* The last BD must have the END flag */
1971*d14abf15SRobert Mustacchi if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1972*d14abf15SRobert Mustacchi rx_bd->flags |= TOE_RX_BD_END;
1973*d14abf15SRobert Mustacchi DbgMessage(NULL, VERBOSEl4rx, "Setting Rx last BD flags=0x%x\n", rx_bd->flags);
1974*d14abf15SRobert Mustacchi }
1975*d14abf15SRobert Mustacchi
1976*d14abf15SRobert Mustacchi DbgBreakIf(frag_list->cnt > TCP_MAX_SGL_SIZE);
1977*d14abf15SRobert Mustacchi tcp_buf->bd_used = frag_list->cnt & TCP_MAX_SGL_SIZE;
1978*d14abf15SRobert Mustacchi DbgBreakIf(tcp_buf->size != dbg_buf_size);
1979*d14abf15SRobert Mustacchi
1980*d14abf15SRobert Mustacchi /* Perpare data for a DoorBell */
1981*d14abf15SRobert Mustacchi rx_con->db_more_bytes += tcp_buf->size;
1982*d14abf15SRobert Mustacchi new_prod = lm_bd_chain_prod_idx(rx_chain);
1983*d14abf15SRobert Mustacchi DbgBreakIf(S16_SUB(new_prod, old_prod) < tcp_buf->bd_used);
1984*d14abf15SRobert Mustacchi rx_con->db_more_bds += S16_SUB(new_prod, old_prod);
1985*d14abf15SRobert Mustacchi rx_con->db_more_bufs++;
1986*d14abf15SRobert Mustacchi
1987*d14abf15SRobert Mustacchi /* Enqueue the buffer to the active_tb_list */
1988*d14abf15SRobert Mustacchi s_list_push_tail(&(rx_con->active_tb_list), &(tcp_buf->link));
1989*d14abf15SRobert Mustacchi rx_con->rq_nbytes += tcp_buf->size;
1990*d14abf15SRobert Mustacchi
1991*d14abf15SRobert Mustacchi if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1992*d14abf15SRobert Mustacchi lm_tcp_rx_write_db(pdev, tcp);
1993*d14abf15SRobert Mustacchi }
1994*d14abf15SRobert Mustacchi
1995*d14abf15SRobert Mustacchi
1996*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
1997*d14abf15SRobert Mustacchi }
1998*d14abf15SRobert Mustacchi
_lm_tcp_rx_get_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)1999*d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_rx_get_buffered_data(
2000*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
2001*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
2002*d14abf15SRobert Mustacchi lm_frag_list_t ** frag_list, /* if *frag_list is NULL, the rx con pre-allocaed will be used */
2003*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t ** gen_buf
2004*d14abf15SRobert Mustacchi )
2005*d14abf15SRobert Mustacchi {
2006*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
2007*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
2008*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * head_of_indication;
2009*d14abf15SRobert Mustacchi d_list_t indicate_list;
2010*d14abf15SRobert Mustacchi d_list_entry_t * entry;
2011*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * curr_gen_buf;
2012*d14abf15SRobert Mustacchi u32_t gen_offset, i;
2013*d14abf15SRobert Mustacchi u32_t num_bufs_to_indicate;
2014*d14abf15SRobert Mustacchi u32_t ind_nbufs=0, ind_nbytes=0;
2015*d14abf15SRobert Mustacchi u8_t dont_send_to_system_more_then_rwin;
2016*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###_lm_tcp_rx_get_buffered_data cid=%d\n", tcp->cid);
2017*d14abf15SRobert Mustacchi
2018*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
2019*d14abf15SRobert Mustacchi
2020*d14abf15SRobert Mustacchi
2021*d14abf15SRobert Mustacchi if ((u16_t)tcp->tcp_cached.rcv_indication_size != 0) {
2022*d14abf15SRobert Mustacchi DbgBreakMsg("MichalS rcv_indication_size != 0 not implemented\n");
2023*d14abf15SRobert Mustacchi /* MichalS TBA: RcvIndicationSize > 0 will change following block quite a lot */
2024*d14abf15SRobert Mustacchi }
2025*d14abf15SRobert Mustacchi
2026*d14abf15SRobert Mustacchi num_bufs_to_indicate = d_list_entry_cnt(&gen_info->peninsula_list);
2027*d14abf15SRobert Mustacchi
2028*d14abf15SRobert Mustacchi /* The buffers in peninsula_list are ALWAYS released, unreleased buffers are in the dpc_peninsula_list. */
2029*d14abf15SRobert Mustacchi DbgBreakIf(((lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->peninsula_list))->placed_bytes == 0);
2030*d14abf15SRobert Mustacchi
2031*d14abf15SRobert Mustacchi if (*frag_list == NULL) {
2032*d14abf15SRobert Mustacchi *frag_list = gen_info->frag_list;
2033*d14abf15SRobert Mustacchi (*frag_list)->cnt = gen_info->max_frag_count;
2034*d14abf15SRobert Mustacchi }
2035*d14abf15SRobert Mustacchi
2036*d14abf15SRobert Mustacchi if (num_bufs_to_indicate > (*frag_list)->cnt) {
2037*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx, "_lm_tcp_rx_get_buffered_data: number of buffers to indicate[%d] is larger than frag_cnt[%d] cid=%d\n",
2038*d14abf15SRobert Mustacchi num_bufs_to_indicate, (*frag_list)->cnt, tcp->cid);
2039*d14abf15SRobert Mustacchi num_bufs_to_indicate = gen_info->max_frag_count;
2040*d14abf15SRobert Mustacchi gen_info->num_non_full_indications++;
2041*d14abf15SRobert Mustacchi }
2042*d14abf15SRobert Mustacchi d_list_init(&indicate_list, NULL, NULL, 0);
2043*d14abf15SRobert Mustacchi dont_send_to_system_more_then_rwin = (u8_t)gen_info->dont_send_to_system_more_then_rwin;
2044*d14abf15SRobert Mustacchi while (num_bufs_to_indicate--) {
2045*d14abf15SRobert Mustacchi entry = d_list_pop_head(&gen_info->peninsula_list);
2046*d14abf15SRobert Mustacchi DbgBreakIf(entry == NULL);
2047*d14abf15SRobert Mustacchi if (dont_send_to_system_more_then_rwin) {
2048*d14abf15SRobert Mustacchi if ((ind_nbytes + ((lm_tcp_gen_buf_t *)entry)->placed_bytes)
2049*d14abf15SRobert Mustacchi > tcp->tcp_cached.initial_rcv_wnd) {
2050*d14abf15SRobert Mustacchi if (ind_nbytes) {
2051*d14abf15SRobert Mustacchi d_list_push_head(&gen_info->peninsula_list, entry);
2052*d14abf15SRobert Mustacchi break;
2053*d14abf15SRobert Mustacchi } else {
2054*d14abf15SRobert Mustacchi dont_send_to_system_more_then_rwin = FALSE;
2055*d14abf15SRobert Mustacchi }
2056*d14abf15SRobert Mustacchi }
2057*d14abf15SRobert Mustacchi }
2058*d14abf15SRobert Mustacchi d_list_push_tail(&indicate_list, entry);
2059*d14abf15SRobert Mustacchi ind_nbufs ++;
2060*d14abf15SRobert Mustacchi ind_nbytes += ((lm_tcp_gen_buf_t *)entry)->placed_bytes;
2061*d14abf15SRobert Mustacchi }
2062*d14abf15SRobert Mustacchi
2063*d14abf15SRobert Mustacchi ind_nbytes -= gen_info->first_buf_offset;
2064*d14abf15SRobert Mustacchi
2065*d14abf15SRobert Mustacchi head_of_indication = (lm_tcp_gen_buf_t *)d_list_peek_head(&indicate_list);
2066*d14abf15SRobert Mustacchi
2067*d14abf15SRobert Mustacchi if CHK_NULL(head_of_indication)
2068*d14abf15SRobert Mustacchi {
2069*d14abf15SRobert Mustacchi DbgBreakIfAll( !head_of_indication ) ;
2070*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE ;
2071*d14abf15SRobert Mustacchi }
2072*d14abf15SRobert Mustacchi
2073*d14abf15SRobert Mustacchi head_of_indication->tcp = tcp;
2074*d14abf15SRobert Mustacchi head_of_indication->ind_nbufs = ind_nbufs;
2075*d14abf15SRobert Mustacchi head_of_indication->ind_bytes = ind_nbytes;
2076*d14abf15SRobert Mustacchi DbgBreakIf(gen_info->peninsula_nbytes < ind_nbytes);
2077*d14abf15SRobert Mustacchi gen_info->peninsula_nbytes -= ind_nbytes;
2078*d14abf15SRobert Mustacchi
2079*d14abf15SRobert Mustacchi /* initialize frag list */
2080*d14abf15SRobert Mustacchi (*frag_list)->cnt = ind_nbufs;
2081*d14abf15SRobert Mustacchi (*frag_list)->size = ind_nbytes;
2082*d14abf15SRobert Mustacchi curr_gen_buf = head_of_indication;
2083*d14abf15SRobert Mustacchi
2084*d14abf15SRobert Mustacchi gen_offset = gen_info->first_buf_offset;
2085*d14abf15SRobert Mustacchi for (i = 0; i < (*frag_list)->cnt; i++ ) {
2086*d14abf15SRobert Mustacchi (*frag_list)->frag_arr[i].addr.as_ptr = curr_gen_buf->buf_virt + gen_offset;
2087*d14abf15SRobert Mustacchi (*frag_list)->frag_arr[i].size = curr_gen_buf->placed_bytes - gen_offset;
2088*d14abf15SRobert Mustacchi curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2089*d14abf15SRobert Mustacchi gen_offset = 0; /* only first buffer can have an offset */
2090*d14abf15SRobert Mustacchi /* we don't touch gen_info->first_buf_offset - this is handled in lm_tcp_rx_buffered_data_indicated */
2091*d14abf15SRobert Mustacchi }
2092*d14abf15SRobert Mustacchi *gen_buf = head_of_indication;
2093*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###_lm_tcp_rx_get_buffered_data ind_bytes = %d\n", (*frag_list)->size);
2094*d14abf15SRobert Mustacchi
2095*d14abf15SRobert Mustacchi mm_atomic_inc(&pdev->toe_info.stats.total_indicated);
2096*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
2097*d14abf15SRobert Mustacchi }
2098*d14abf15SRobert Mustacchi
lm_tcp_rx_get_buffered_data_from_terminate(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)2099*d14abf15SRobert Mustacchi lm_status_t lm_tcp_rx_get_buffered_data_from_terminate (
2100*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
2101*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
2102*d14abf15SRobert Mustacchi lm_frag_list_t ** frag_list,
2103*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t ** gen_buf
2104*d14abf15SRobert Mustacchi )
2105*d14abf15SRobert Mustacchi {
2106*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
2107*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
2108*d14abf15SRobert Mustacchi u16_t buff_cnt;
2109*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * unwanted_gen_buf = NULL;
2110*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * temp_gen_buf = NULL;
2111*d14abf15SRobert Mustacchi lm_status_t lm_status = LM_STATUS_SUCCESS;
2112*d14abf15SRobert Mustacchi
2113*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d\n", tcp->cid);
2114*d14abf15SRobert Mustacchi
2115*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
2116*d14abf15SRobert Mustacchi
2117*d14abf15SRobert Mustacchi /* make sure ALL the peninsula is released */
2118*d14abf15SRobert Mustacchi DbgBreakIf(!d_list_is_empty(&gen_info->peninsula_list) &&
2119*d14abf15SRobert Mustacchi (((lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->peninsula_list))->placed_bytes == 0));
2120*d14abf15SRobert Mustacchi
2121*d14abf15SRobert Mustacchi *frag_list = NULL;
2122*d14abf15SRobert Mustacchi if (gen_info->peninsula_nbytes == 0) {
2123*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
2124*d14abf15SRobert Mustacchi }
2125*d14abf15SRobert Mustacchi
2126*d14abf15SRobert Mustacchi /* DbgBreakIf(gen_info->peninsula_nbytes > tcp->tcp_cached.initial_rcv_wnd);*/
2127*d14abf15SRobert Mustacchi gen_info->dont_send_to_system_more_then_rwin = FALSE;
2128*d14abf15SRobert Mustacchi if ((buff_cnt = (u16_t)d_list_entry_cnt(&gen_info->peninsula_list)) > gen_info->max_frag_count) {
2129*d14abf15SRobert Mustacchi lm_bd_chain_t *bd_chain = &tcp->rx_con->bd_chain;
2130*d14abf15SRobert Mustacchi u16_t possible_frag_count, decreased_count;
2131*d14abf15SRobert Mustacchi possible_frag_count = (/*bd_chain->page_cnt**/
2132*d14abf15SRobert Mustacchi LM_PAGE_SIZE - sizeof(lm_frag_list_t)) / sizeof(lm_frag_t);
2133*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2134*d14abf15SRobert Mustacchi "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: peninsula_list cnt (%d) > max frag_count (%d)\n",
2135*d14abf15SRobert Mustacchi tcp->cid, buff_cnt, gen_info->max_frag_count);
2136*d14abf15SRobert Mustacchi
2137*d14abf15SRobert Mustacchi if (possible_frag_count > gen_info->max_frag_count) {
2138*d14abf15SRobert Mustacchi /* This solution is ugly:
2139*d14abf15SRobert Mustacchi since there will not be any further buffered data indications to the client, we must be able to
2140*d14abf15SRobert Mustacchi indicate all the buffered data now. But the preallocated frag list in the rx con is too short!
2141*d14abf15SRobert Mustacchi So instead of the pre-allocated frag list we need to use a larger memory. Our options:
2142*d14abf15SRobert Mustacchi 1. allocate memory here and release it later.
2143*d14abf15SRobert Mustacchi 2. use other pre-allocated memory that is not in use anymore (e.g. the bd chain) [chosen solution]
2144*d14abf15SRobert Mustacchi In any case both solutions may fail: memory allocation can fail and the other pre-allocated memory
2145*d14abf15SRobert Mustacchi might also be too short. the fallback from this is:
2146*d14abf15SRobert Mustacchi - don't indicate anything and release the peninsula (NOT IMPLEMENTED)
2147*d14abf15SRobert Mustacchi DbgBreakIfAll((u16_t)(sizeof(lm_frag_list_t) + sizeof(lm_frag_t)*buff_cnt) > bd_chain->page_cnt*LM_PAGE_SIZE); */
2148*d14abf15SRobert Mustacchi if (possible_frag_count < buff_cnt) {
2149*d14abf15SRobert Mustacchi decreased_count = possible_frag_count;
2150*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2151*d14abf15SRobert Mustacchi "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: peninsula_list cnt (%d) > aux.frag_cnt (%d)\n",
2152*d14abf15SRobert Mustacchi tcp->cid, buff_cnt, possible_frag_count);
2153*d14abf15SRobert Mustacchi } else {
2154*d14abf15SRobert Mustacchi decreased_count = 0;
2155*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2156*d14abf15SRobert Mustacchi "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: aux.frag_cnt (%d) is enough for %d buffs\n",
2157*d14abf15SRobert Mustacchi tcp->cid, possible_frag_count, buff_cnt);
2158*d14abf15SRobert Mustacchi }
2159*d14abf15SRobert Mustacchi *frag_list = (lm_frag_list_t*)bd_chain->bd_chain_virt;
2160*d14abf15SRobert Mustacchi (*frag_list)->cnt = possible_frag_count;
2161*d14abf15SRobert Mustacchi (*frag_list)->size = 0;
2162*d14abf15SRobert Mustacchi } else {
2163*d14abf15SRobert Mustacchi decreased_count = (u16_t)gen_info->max_frag_count;
2164*d14abf15SRobert Mustacchi }
2165*d14abf15SRobert Mustacchi if (decreased_count) {
2166*d14abf15SRobert Mustacchi u16_t returned_buff_cnt = lm_squeeze_rx_buffer_list(pdev, tcp, decreased_count, &unwanted_gen_buf);
2167*d14abf15SRobert Mustacchi if (decreased_count < returned_buff_cnt) {
2168*d14abf15SRobert Mustacchi lm_frag_list_t* new_frag_list;
2169*d14abf15SRobert Mustacchi u32_t mem_size_for_new_frag_list = returned_buff_cnt * sizeof(lm_frag_t) + sizeof(lm_frag_list_t);
2170*d14abf15SRobert Mustacchi // new_frag_list = (lm_frag_list_t*)mm_alloc_mem(pdev, mem_size_for_new_frag_list, LM_RESOURCE_NDIS);
2171*d14abf15SRobert Mustacchi new_frag_list = (lm_frag_list_t*)mm_rt_alloc_mem(pdev, mem_size_for_new_frag_list, LM_RESOURCE_NDIS);
2172*d14abf15SRobert Mustacchi
2173*d14abf15SRobert Mustacchi if (new_frag_list != NULL) {
2174*d14abf15SRobert Mustacchi tcp->type_of_aux_memory = TCP_CON_AUX_RT_MEM;
2175*d14abf15SRobert Mustacchi tcp->aux_memory = new_frag_list;
2176*d14abf15SRobert Mustacchi tcp->aux_mem_size = mem_size_for_new_frag_list;
2177*d14abf15SRobert Mustacchi *frag_list = new_frag_list;
2178*d14abf15SRobert Mustacchi (*frag_list)->cnt = returned_buff_cnt;
2179*d14abf15SRobert Mustacchi (*frag_list)->size = 0;
2180*d14abf15SRobert Mustacchi tcp->aux_mem_flag = TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION;
2181*d14abf15SRobert Mustacchi } else {
2182*d14abf15SRobert Mustacchi /* No way. Let's send up only part of data. Data distortion is unavoidable.
2183*d14abf15SRobert Mustacchi TODO: prevent data distortion by termination the connection itself at least */
2184*d14abf15SRobert Mustacchi lm_status = LM_STATUS_RESOURCE;
2185*d14abf15SRobert Mustacchi tcp->aux_mem_flag = TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION;
2186*d14abf15SRobert Mustacchi /* Get rid of whatever remains in the peninsula...add it to unwanted... */
2187*d14abf15SRobert Mustacchi if (unwanted_gen_buf)
2188*d14abf15SRobert Mustacchi {
2189*d14abf15SRobert Mustacchi temp_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_tail(&gen_info->peninsula_list);
2190*d14abf15SRobert Mustacchi if (temp_gen_buf)
2191*d14abf15SRobert Mustacchi {
2192*d14abf15SRobert Mustacchi temp_gen_buf->link.next = &(unwanted_gen_buf->link);
2193*d14abf15SRobert Mustacchi unwanted_gen_buf->link.prev = &(temp_gen_buf->link);
2194*d14abf15SRobert Mustacchi unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2195*d14abf15SRobert Mustacchi }
2196*d14abf15SRobert Mustacchi }
2197*d14abf15SRobert Mustacchi else
2198*d14abf15SRobert Mustacchi {
2199*d14abf15SRobert Mustacchi unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2200*d14abf15SRobert Mustacchi }
2201*d14abf15SRobert Mustacchi d_list_clear(&gen_info->peninsula_list);
2202*d14abf15SRobert Mustacchi
2203*d14abf15SRobert Mustacchi }
2204*d14abf15SRobert Mustacchi }
2205*d14abf15SRobert Mustacchi }
2206*d14abf15SRobert Mustacchi }
2207*d14abf15SRobert Mustacchi if (lm_status == LM_STATUS_SUCCESS)
2208*d14abf15SRobert Mustacchi {
2209*d14abf15SRobert Mustacchi _lm_tcp_rx_get_buffered_data(pdev, tcp, frag_list, gen_buf);
2210*d14abf15SRobert Mustacchi
2211*d14abf15SRobert Mustacchi /* for cleaness: lm_tcp_rx_buffered_data_indicated will not be called
2212*d14abf15SRobert Mustacchi * indication is 'succesfull' */
2213*d14abf15SRobert Mustacchi gen_info->num_bytes_indicated += (u32_t)(*frag_list)->size;
2214*d14abf15SRobert Mustacchi gen_info->first_buf_offset = 0;
2215*d14abf15SRobert Mustacchi gen_info->num_buffers_indicated += (*gen_buf)->ind_nbufs;
2216*d14abf15SRobert Mustacchi }
2217*d14abf15SRobert Mustacchi
2218*d14abf15SRobert Mustacchi gen_info->peninsula_blocked = TRUE;
2219*d14abf15SRobert Mustacchi
2220*d14abf15SRobert Mustacchi if (unwanted_gen_buf) {
2221*d14abf15SRobert Mustacchi lm_tcp_return_gen_bufs(pdev, tcp, unwanted_gen_buf,MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2222*d14abf15SRobert Mustacchi }
2223*d14abf15SRobert Mustacchi
2224*d14abf15SRobert Mustacchi if (*gen_buf) {
2225*d14abf15SRobert Mustacchi /* with data taken from terminate, we can always act as in 'short-loop' since the bytes for
2226*d14abf15SRobert Mustacchi * this connection won't increase the window anyway... */
2227*d14abf15SRobert Mustacchi (*gen_buf)->flags &= ~GEN_FLAG_SWS_UPDATE;
2228*d14abf15SRobert Mustacchi }
2229*d14abf15SRobert Mustacchi
2230*d14abf15SRobert Mustacchi return lm_status;
2231*d14abf15SRobert Mustacchi }
2232*d14abf15SRobert Mustacchi
lm_tcp_rx_get_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)2233*d14abf15SRobert Mustacchi lm_status_t lm_tcp_rx_get_buffered_data(
2234*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
2235*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
2236*d14abf15SRobert Mustacchi lm_frag_list_t ** frag_list,
2237*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t ** gen_buf
2238*d14abf15SRobert Mustacchi )
2239*d14abf15SRobert Mustacchi {
2240*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
2241*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
2242*d14abf15SRobert Mustacchi lm_status_t lm_status;
2243*d14abf15SRobert Mustacchi
2244*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_get_buffered_data cid=%d\n", tcp->cid);
2245*d14abf15SRobert Mustacchi gen_info = &rx_con->u.rx.gen_info;
2246*d14abf15SRobert Mustacchi
2247*d14abf15SRobert Mustacchi DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called */
2248*d14abf15SRobert Mustacchi
2249*d14abf15SRobert Mustacchi if (gen_info->peninsula_nbytes == 0 || (rx_con->flags & TCP_RX_IND_BLOCKED)) {
2250*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE;
2251*d14abf15SRobert Mustacchi }
2252*d14abf15SRobert Mustacchi
2253*d14abf15SRobert Mustacchi *frag_list = NULL;
2254*d14abf15SRobert Mustacchi lm_status = _lm_tcp_rx_get_buffered_data(pdev, tcp, frag_list, gen_buf);
2255*d14abf15SRobert Mustacchi if (*gen_buf) {
2256*d14abf15SRobert Mustacchi if (gen_info->update_window_mode == LM_TOE_UPDATE_MODE_LONG_LOOP) {
2257*d14abf15SRobert Mustacchi gen_info->pending_indicated_bytes += (*gen_buf)->ind_bytes;
2258*d14abf15SRobert Mustacchi /* We need to increase the number of pending return indications here, since once we return
2259*d14abf15SRobert Mustacchi * we are basically pending for the return of this specific indication. There are two cases
2260*d14abf15SRobert Mustacchi * that require decreasing the pending return indications. The first is if the indication failed
2261*d14abf15SRobert Mustacchi * the second is if it succeeded AND the buffers returned... */
2262*d14abf15SRobert Mustacchi gen_info->pending_return_indications++;
2263*d14abf15SRobert Mustacchi (*gen_buf)->flags |= GEN_FLAG_SWS_UPDATE;
2264*d14abf15SRobert Mustacchi } else {
2265*d14abf15SRobert Mustacchi (*gen_buf)->flags &= ~GEN_FLAG_SWS_UPDATE;
2266*d14abf15SRobert Mustacchi }
2267*d14abf15SRobert Mustacchi }
2268*d14abf15SRobert Mustacchi
2269*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
2270*d14abf15SRobert Mustacchi }
2271*d14abf15SRobert Mustacchi
lm_tcp_rx_buffered_data_indicated(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t accepted_bytes,lm_tcp_gen_buf_t * gen_buf)2272*d14abf15SRobert Mustacchi void lm_tcp_rx_buffered_data_indicated(
2273*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
2274*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
2275*d14abf15SRobert Mustacchi u32_t accepted_bytes,
2276*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf /* head of indications generic buffer NULL if indication succeeded */
2277*d14abf15SRobert Mustacchi )
2278*d14abf15SRobert Mustacchi {
2279*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info = &tcp->rx_con->u.rx.gen_info;
2280*d14abf15SRobert Mustacchi
2281*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx , "###lm_tcp_rx_buffered_data_indicated accepted_bytes = %d cid=%d\n", accepted_bytes, tcp->cid);
2282*d14abf15SRobert Mustacchi
2283*d14abf15SRobert Mustacchi DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called */
2284*d14abf15SRobert Mustacchi
2285*d14abf15SRobert Mustacchi lm_tcp_incr_consumed_gen(pdev, tcp, accepted_bytes);
2286*d14abf15SRobert Mustacchi gen_info->num_bytes_indicated += accepted_bytes;
2287*d14abf15SRobert Mustacchi
2288*d14abf15SRobert Mustacchi if (gen_buf == NULL) { /* succesfull indication */
2289*d14abf15SRobert Mustacchi gen_info->first_buf_offset = 0;
2290*d14abf15SRobert Mustacchi if (gen_info->update_window_mode == LM_TOE_UPDATE_MODE_SHORT_LOOP) {
2291*d14abf15SRobert Mustacchi gen_info->add_sws_bytes += accepted_bytes;
2292*d14abf15SRobert Mustacchi }
2293*d14abf15SRobert Mustacchi gen_info->num_success_indicates++;
2294*d14abf15SRobert Mustacchi gen_info->bytes_indicated_accepted += accepted_bytes;
2295*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.zero_byte_posted_during_ind = FALSE;
2296*d14abf15SRobert Mustacchi } else { /* complete rejection / partial success, gen_buf remains in our control */
2297*d14abf15SRobert Mustacchi /* indication failed */
2298*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * curr_gen_buf, * ret_buf;
2299*d14abf15SRobert Mustacchi d_list_t return_to_pool_list;
2300*d14abf15SRobert Mustacchi d_list_t return_to_peninsula_list;
2301*d14abf15SRobert Mustacchi u32_t nbytes;
2302*d14abf15SRobert Mustacchi DbgBreakIf(accepted_bytes > gen_buf->ind_bytes);
2303*d14abf15SRobert Mustacchi gen_info->peninsula_nbytes += gen_buf->ind_bytes - accepted_bytes;
2304*d14abf15SRobert Mustacchi
2305*d14abf15SRobert Mustacchi gen_info->num_failed_indicates++;
2306*d14abf15SRobert Mustacchi gen_info->bytes_indicated_accepted+= accepted_bytes;
2307*d14abf15SRobert Mustacchi gen_info->bytes_indicated_rejected+= gen_buf->ind_bytes - accepted_bytes;
2308*d14abf15SRobert Mustacchi
2309*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl4rx, "GENERIC: %s Indication for cid=%d accepted_bytes=%d\n",
2310*d14abf15SRobert Mustacchi (accepted_bytes == 0)? "Rejected" : "Partial", tcp->cid, accepted_bytes);
2311*d14abf15SRobert Mustacchi
2312*d14abf15SRobert Mustacchi d_list_init(&return_to_pool_list, NULL, NULL, 0);
2313*d14abf15SRobert Mustacchi d_list_init(&return_to_peninsula_list, NULL, NULL, 0);
2314*d14abf15SRobert Mustacchi
2315*d14abf15SRobert Mustacchi DbgBreakIf(gen_buf->tcp->rx_con->flags & TCP_INDICATE_REJECTED);
2316*d14abf15SRobert Mustacchi if (tcp->rx_con->u.rx.zero_byte_posted_during_ind) {
2317*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.zero_byte_posted_during_ind = FALSE;
2318*d14abf15SRobert Mustacchi } else {
2319*d14abf15SRobert Mustacchi gen_buf->tcp->rx_con->flags |= TCP_INDICATE_REJECTED;
2320*d14abf15SRobert Mustacchi }
2321*d14abf15SRobert Mustacchi
2322*d14abf15SRobert Mustacchi curr_gen_buf = gen_buf;
2323*d14abf15SRobert Mustacchi
2324*d14abf15SRobert Mustacchi /* indicated bytes are in fact 'freed up' space: so we can make the sws_bytes larger,
2325*d14abf15SRobert Mustacchi * this is always true here luxury-mode or not */
2326*d14abf15SRobert Mustacchi gen_info->add_sws_bytes += accepted_bytes;
2327*d14abf15SRobert Mustacchi
2328*d14abf15SRobert Mustacchi /* buffer was returned to us so it is no longer pending return...if we increased the 'pending' we have
2329*d14abf15SRobert Mustacchi * to decrease */
2330*d14abf15SRobert Mustacchi if (gen_buf->flags & GEN_FLAG_SWS_UPDATE) {
2331*d14abf15SRobert Mustacchi gen_info->pending_return_indications--;
2332*d14abf15SRobert Mustacchi gen_info->pending_indicated_bytes-=gen_buf->ind_bytes;
2333*d14abf15SRobert Mustacchi }
2334*d14abf15SRobert Mustacchi mm_atomic_inc(&pdev->toe_info.stats.total_indicated_returned); /* stats */
2335*d14abf15SRobert Mustacchi
2336*d14abf15SRobert Mustacchi /* return buffers that were fully indicated to the generic pool, ones that we're not, to the peninsula */
2337*d14abf15SRobert Mustacchi while (accepted_bytes) {
2338*d14abf15SRobert Mustacchi nbytes = ((lm_tcp_gen_buf_t *)curr_gen_buf)->placed_bytes - gen_info->first_buf_offset;
2339*d14abf15SRobert Mustacchi if (accepted_bytes >= nbytes) {
2340*d14abf15SRobert Mustacchi /* the buffer was completely accepted */
2341*d14abf15SRobert Mustacchi accepted_bytes -= nbytes;
2342*d14abf15SRobert Mustacchi ret_buf = curr_gen_buf;
2343*d14abf15SRobert Mustacchi curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2344*d14abf15SRobert Mustacchi d_list_push_tail(&return_to_pool_list, &ret_buf->link);
2345*d14abf15SRobert Mustacchi gen_info->num_buffers_indicated++;
2346*d14abf15SRobert Mustacchi gen_info->first_buf_offset = 0;
2347*d14abf15SRobert Mustacchi } else {
2348*d14abf15SRobert Mustacchi gen_info->first_buf_offset += (u16_t)accepted_bytes;
2349*d14abf15SRobert Mustacchi accepted_bytes = 0;
2350*d14abf15SRobert Mustacchi }
2351*d14abf15SRobert Mustacchi }
2352*d14abf15SRobert Mustacchi
2353*d14abf15SRobert Mustacchi /* is there anything to return to the peninsula ? (i.e. return_head moved) */
2354*d14abf15SRobert Mustacchi while (curr_gen_buf) {
2355*d14abf15SRobert Mustacchi curr_gen_buf->ind_bytes = 0;
2356*d14abf15SRobert Mustacchi curr_gen_buf->ind_nbufs = 0;
2357*d14abf15SRobert Mustacchi ret_buf = curr_gen_buf;
2358*d14abf15SRobert Mustacchi curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2359*d14abf15SRobert Mustacchi gen_info->bufs_indicated_rejected++;
2360*d14abf15SRobert Mustacchi d_list_push_tail(&return_to_peninsula_list, &ret_buf->link);
2361*d14abf15SRobert Mustacchi }
2362*d14abf15SRobert Mustacchi
2363*d14abf15SRobert Mustacchi if (!d_list_is_empty(&return_to_pool_list)) {
2364*d14abf15SRobert Mustacchi lm_tcp_return_list_of_gen_bufs(pdev, tcp, &return_to_pool_list, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2365*d14abf15SRobert Mustacchi }
2366*d14abf15SRobert Mustacchi
2367*d14abf15SRobert Mustacchi /* There must be at least something to return to the peninsula since this was partial indication */
2368*d14abf15SRobert Mustacchi DbgBreakIf(d_list_is_empty(&return_to_peninsula_list));
2369*d14abf15SRobert Mustacchi /* re-insert generic buffers to the peninsula.
2370*d14abf15SRobert Mustacchi * we need to re-insert the buffers to the head of the peninsula */
2371*d14abf15SRobert Mustacchi d_list_add_head(&gen_info->peninsula_list, &return_to_peninsula_list);
2372*d14abf15SRobert Mustacchi
2373*d14abf15SRobert Mustacchi }
2374*d14abf15SRobert Mustacchi
2375*d14abf15SRobert Mustacchi }
2376*d14abf15SRobert Mustacchi
2377*d14abf15SRobert Mustacchi /** Description
2378*d14abf15SRobert Mustacchi * returns the buffers to the generic pool
2379*d14abf15SRobert Mustacchi */
lm_tcp_return_gen_bufs(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf,u32_t flags,u8_t grq_idx)2380*d14abf15SRobert Mustacchi void lm_tcp_return_gen_bufs(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf,u32_t flags, u8_t grq_idx)
2381*d14abf15SRobert Mustacchi {
2382*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * curr_gen_buf = gen_buf;
2383*d14abf15SRobert Mustacchi
2384*d14abf15SRobert Mustacchi #if DBG
2385*d14abf15SRobert Mustacchi gen_buf->ind_nbufs = 0; /* for debugging purposes will count how many buffers are in our list */
2386*d14abf15SRobert Mustacchi while (curr_gen_buf) {
2387*d14abf15SRobert Mustacchi DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
2388*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
2389*d14abf15SRobert Mustacchi /* We increase the bytes for both pool-buffers, and buffered-data buffers because when the OS
2390*d14abf15SRobert Mustacchi * gives posted buffers the window is smaller */
2391*d14abf15SRobert Mustacchi curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2392*d14abf15SRobert Mustacchi gen_buf->ind_nbufs++;
2393*d14abf15SRobert Mustacchi }
2394*d14abf15SRobert Mustacchi #endif
2395*d14abf15SRobert Mustacchi
2396*d14abf15SRobert Mustacchi mm_tcp_return_gen_bufs(pdev, gen_buf,flags,grq_idx);
2397*d14abf15SRobert Mustacchi }
2398*d14abf15SRobert Mustacchi
2399*d14abf15SRobert Mustacchi /** Description
2400*d14abf15SRobert Mustacchi * returns the buffers to the generic pool
2401*d14abf15SRobert Mustacchi */
lm_tcp_return_list_of_gen_bufs(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,d_list_t * gen_buf_list,u32_t flags,u8_t grq_idx)2402*d14abf15SRobert Mustacchi void lm_tcp_return_list_of_gen_bufs(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, d_list_t * gen_buf_list,u32_t flags, u8_t grq_idx)
2403*d14abf15SRobert Mustacchi {
2404*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(gen_buf_list);
2405*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * curr_gen_buf = gen_buf;
2406*d14abf15SRobert Mustacchi
2407*d14abf15SRobert Mustacchi #if DBG
2408*d14abf15SRobert Mustacchi gen_buf->ind_nbufs = 0; /* for debugging purposes will count how many buffers are in our list */
2409*d14abf15SRobert Mustacchi while (curr_gen_buf) {
2410*d14abf15SRobert Mustacchi DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
2411*d14abf15SRobert Mustacchi DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
2412*d14abf15SRobert Mustacchi /* We increase the bytes for both pool-buffers, and buffered-data buffers because when the OS
2413*d14abf15SRobert Mustacchi * gives posted buffers the window is smaller */
2414*d14abf15SRobert Mustacchi curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2415*d14abf15SRobert Mustacchi gen_buf->ind_nbufs++;
2416*d14abf15SRobert Mustacchi }
2417*d14abf15SRobert Mustacchi #endif
2418*d14abf15SRobert Mustacchi
2419*d14abf15SRobert Mustacchi mm_tcp_return_list_of_gen_bufs(pdev, gen_buf_list,flags,grq_idx);
2420*d14abf15SRobert Mustacchi }
2421*d14abf15SRobert Mustacchi
lm_tcp_rx_indication_returned(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf)2422*d14abf15SRobert Mustacchi void lm_tcp_rx_indication_returned(struct _lm_device_t *pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf)
2423*d14abf15SRobert Mustacchi {
2424*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_con_indication_returned cid=%d\n", tcp->cid);
2425*d14abf15SRobert Mustacchi
2426*d14abf15SRobert Mustacchi DbgBreakIf(tcp != gen_buf->tcp);
2427*d14abf15SRobert Mustacchi DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
2428*d14abf15SRobert Mustacchi
2429*d14abf15SRobert Mustacchi /* TBA fix in case of RcvIndicateSize > 0 */
2430*d14abf15SRobert Mustacchi DbgBreakIf(gen_buf->refcnt != 0);
2431*d14abf15SRobert Mustacchi
2432*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.gen_info.pending_return_indications--;
2433*d14abf15SRobert Mustacchi tcp->rx_con->u.rx.gen_info.pending_indicated_bytes -= gen_buf->ind_bytes;
2434*d14abf15SRobert Mustacchi
2435*d14abf15SRobert Mustacchi /* Update the sws bytes according to the ind number of bytes this function is only called if in fact
2436*d14abf15SRobert Mustacchi * this is a buffer that is marked as an 'update buffer' otherwise this function isn't called. */
2437*d14abf15SRobert Mustacchi DbgBreakIfAll(!(gen_buf->flags & GEN_FLAG_SWS_UPDATE));
2438*d14abf15SRobert Mustacchi lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, gen_buf->ind_bytes, TCP_RX_POST_SWS_INC);
2439*d14abf15SRobert Mustacchi lm_tcp_return_gen_bufs(pdev, tcp, gen_buf, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2440*d14abf15SRobert Mustacchi }
2441*d14abf15SRobert Mustacchi
lm_tcp_is_tcp_dead(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t op)2442*d14abf15SRobert Mustacchi u8_t lm_tcp_is_tcp_dead(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, u8_t op)
2443*d14abf15SRobert Mustacchi {
2444*d14abf15SRobert Mustacchi UNREFERENCED_PARAMETER_(pdev);
2445*d14abf15SRobert Mustacchi
2446*d14abf15SRobert Mustacchi if(op == TCP_IS_DEAD_OP_UPLD_COMP) {
2447*d14abf15SRobert Mustacchi DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
2448*d14abf15SRobert Mustacchi tcp->hdr.status = STATE_STATUS_UPLOAD_DONE;
2449*d14abf15SRobert Mustacchi }
2450*d14abf15SRobert Mustacchi if (GET_FLAGS(tcp->rx_con->flags, TCP_COMP_DEFERRED)) {
2451*d14abf15SRobert Mustacchi /* we can't kill the connection here! it's still being handled by deferred function which will
2452*d14abf15SRobert Mustacchi * access it... killing will be done from that context... */
2453*d14abf15SRobert Mustacchi return FALSE;
2454*d14abf15SRobert Mustacchi }
2455*d14abf15SRobert Mustacchi if (tcp->rx_con->u.rx.gen_info.pending_return_indications == 0) {
2456*d14abf15SRobert Mustacchi /* If the function is called from offload completion flow, we might have completions on the RCQ
2457*d14abf15SRobert Mustacchi that we haven't processed yet so haven't completed / indicated bufs,
2458*d14abf15SRobert Mustacchi so there are bytes in the peninsula and this state is legal */
2459*d14abf15SRobert Mustacchi DbgBreakIf(!(tcp->rx_con->flags & TCP_RX_IND_BLOCKED) &&
2460*d14abf15SRobert Mustacchi (tcp->rx_con->u.rx.gen_info.peninsula_nbytes != 0) &&
2461*d14abf15SRobert Mustacchi (op != TCP_IS_DEAD_OP_OFLD_COMP_DFRD));
2462*d14abf15SRobert Mustacchi if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE) {
2463*d14abf15SRobert Mustacchi return TRUE;
2464*d14abf15SRobert Mustacchi }
2465*d14abf15SRobert Mustacchi }
2466*d14abf15SRobert Mustacchi return FALSE;
2467*d14abf15SRobert Mustacchi }
2468*d14abf15SRobert Mustacchi
lm_tcp_con_status(struct _lm_device_t * pdev,lm_tcp_con_t * rx_con)2469*d14abf15SRobert Mustacchi lm_status_t lm_tcp_con_status(struct _lm_device_t * pdev, lm_tcp_con_t * rx_con)
2470*d14abf15SRobert Mustacchi {
2471*d14abf15SRobert Mustacchi UNREFERENCED_PARAMETER_(pdev);
2472*d14abf15SRobert Mustacchi
2473*d14abf15SRobert Mustacchi if (rx_con->flags & TCP_RX_POST_BLOCKED) {
2474*d14abf15SRobert Mustacchi return LM_STATUS_CONNECTION_CLOSED;
2475*d14abf15SRobert Mustacchi }
2476*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
2477*d14abf15SRobert Mustacchi }
2478*d14abf15SRobert Mustacchi
lm_tcp_calc_gen_buf_size(struct _lm_device_t * pdev)2479*d14abf15SRobert Mustacchi u32_t lm_tcp_calc_gen_buf_size(struct _lm_device_t * pdev)
2480*d14abf15SRobert Mustacchi {
2481*d14abf15SRobert Mustacchi u32_t gen_buf_size = 0;
2482*d14abf15SRobert Mustacchi u32_t const chain_idx = LM_SW_LEADING_RSS_CID(pdev);
2483*d14abf15SRobert Mustacchi
2484*d14abf15SRobert Mustacchi /* determine size of buffer: in steps of pages, larger than the minimum and
2485*d14abf15SRobert Mustacchi * the mtu */
2486*d14abf15SRobert Mustacchi if(CHK_NULL(pdev) ||
2487*d14abf15SRobert Mustacchi ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
2488*d14abf15SRobert Mustacchi (CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) || /* TODO E2 add IS_E2*/
2489*d14abf15SRobert Mustacchi (CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
2490*d14abf15SRobert Mustacchi {
2491*d14abf15SRobert Mustacchi DbgBreakIf(1);
2492*d14abf15SRobert Mustacchi return 0;
2493*d14abf15SRobert Mustacchi }
2494*d14abf15SRobert Mustacchi
2495*d14abf15SRobert Mustacchi if (pdev->params.l4_gen_buf_size < pdev->params.l2_cli_con_params[chain_idx].mtu)
2496*d14abf15SRobert Mustacchi {
2497*d14abf15SRobert Mustacchi gen_buf_size = pdev->params.l2_cli_con_params[chain_idx].mtu;
2498*d14abf15SRobert Mustacchi
2499*d14abf15SRobert Mustacchi }
2500*d14abf15SRobert Mustacchi else
2501*d14abf15SRobert Mustacchi {
2502*d14abf15SRobert Mustacchi gen_buf_size = pdev->params.l4_gen_buf_size;
2503*d14abf15SRobert Mustacchi }
2504*d14abf15SRobert Mustacchi /* bring to page-size boundary */
2505*d14abf15SRobert Mustacchi gen_buf_size = (gen_buf_size + (LM_PAGE_SIZE-1)) & ~(LM_PAGE_SIZE-1);
2506*d14abf15SRobert Mustacchi
2507*d14abf15SRobert Mustacchi return gen_buf_size;
2508*d14abf15SRobert Mustacchi }
2509*d14abf15SRobert Mustacchi
lm_squeeze_rx_buffer_list(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u16_t adjust_number,lm_tcp_gen_buf_t ** unwanted_gen_buf)2510*d14abf15SRobert Mustacchi u16_t lm_squeeze_rx_buffer_list(
2511*d14abf15SRobert Mustacchi struct _lm_device_t * pdev,
2512*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp,
2513*d14abf15SRobert Mustacchi u16_t adjust_number,
2514*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t ** unwanted_gen_buf
2515*d14abf15SRobert Mustacchi )
2516*d14abf15SRobert Mustacchi {
2517*d14abf15SRobert Mustacchi u32_t gen_buff_size = lm_tcp_calc_gen_buf_size(pdev);
2518*d14abf15SRobert Mustacchi lm_tcp_con_t * rx_con = tcp->rx_con;
2519*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info = &rx_con->u.rx.gen_info;
2520*d14abf15SRobert Mustacchi d_list_t unwanted_list = {0};
2521*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf_copy_to = NULL;
2522*d14abf15SRobert Mustacchi lm_tcp_gen_buf_t * gen_buf_copy_from = NULL, *next_buffer = NULL;
2523*d14abf15SRobert Mustacchi u16_t free_bytes_to_copy = 0, bytes_to_copy = 0, gen_buf_offset = 0;
2524*d14abf15SRobert Mustacchi u8_t force_buffer_division = FALSE;
2525*d14abf15SRobert Mustacchi u16_t buffers_number = (u16_t)d_list_entry_cnt(&gen_info->peninsula_list);
2526*d14abf15SRobert Mustacchi
2527*d14abf15SRobert Mustacchi *unwanted_gen_buf = NULL;
2528*d14abf15SRobert Mustacchi
2529*d14abf15SRobert Mustacchi if ((adjust_number * gen_buff_size) >= gen_info->peninsula_nbytes) {
2530*d14abf15SRobert Mustacchi d_list_init(&unwanted_list, NULL, NULL, 0);
2531*d14abf15SRobert Mustacchi gen_buf_copy_to = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2532*d14abf15SRobert Mustacchi next_buffer = NEXT_GEN_BUF(gen_buf_copy_to);
2533*d14abf15SRobert Mustacchi free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2534*d14abf15SRobert Mustacchi while (buffers_number > adjust_number) {
2535*d14abf15SRobert Mustacchi gen_buf_copy_from = next_buffer;
2536*d14abf15SRobert Mustacchi if (gen_buf_copy_from != NULL) {
2537*d14abf15SRobert Mustacchi next_buffer = NEXT_GEN_BUF(gen_buf_copy_from);
2538*d14abf15SRobert Mustacchi bytes_to_copy = gen_buf_copy_from->placed_bytes;
2539*d14abf15SRobert Mustacchi if (bytes_to_copy <= free_bytes_to_copy) {
2540*d14abf15SRobert Mustacchi mm_memcpy(gen_buf_copy_to->buf_virt + gen_buf_copy_to->placed_bytes,
2541*d14abf15SRobert Mustacchi gen_buf_copy_from->buf_virt, bytes_to_copy);
2542*d14abf15SRobert Mustacchi free_bytes_to_copy -= bytes_to_copy;
2543*d14abf15SRobert Mustacchi gen_buf_copy_to->placed_bytes += bytes_to_copy;
2544*d14abf15SRobert Mustacchi d_list_remove_entry(&gen_info->peninsula_list, &gen_buf_copy_from->link);
2545*d14abf15SRobert Mustacchi d_list_push_tail(&unwanted_list, &gen_buf_copy_from->link);
2546*d14abf15SRobert Mustacchi buffers_number--;
2547*d14abf15SRobert Mustacchi continue;
2548*d14abf15SRobert Mustacchi } else {
2549*d14abf15SRobert Mustacchi if (force_buffer_division) {
2550*d14abf15SRobert Mustacchi if (free_bytes_to_copy) {
2551*d14abf15SRobert Mustacchi mm_memcpy(gen_buf_copy_to->buf_virt + gen_buf_copy_to->placed_bytes,
2552*d14abf15SRobert Mustacchi gen_buf_copy_from->buf_virt, free_bytes_to_copy);
2553*d14abf15SRobert Mustacchi gen_buf_copy_to->placed_bytes += free_bytes_to_copy;
2554*d14abf15SRobert Mustacchi mm_memcpy(gen_buf_copy_from->buf_virt,
2555*d14abf15SRobert Mustacchi gen_buf_copy_from->buf_virt + free_bytes_to_copy, bytes_to_copy - free_bytes_to_copy);
2556*d14abf15SRobert Mustacchi gen_buf_copy_from->placed_bytes -= free_bytes_to_copy;
2557*d14abf15SRobert Mustacchi }
2558*d14abf15SRobert Mustacchi }
2559*d14abf15SRobert Mustacchi gen_buf_copy_to = gen_buf_copy_from;
2560*d14abf15SRobert Mustacchi next_buffer = NEXT_GEN_BUF(gen_buf_copy_from);
2561*d14abf15SRobert Mustacchi free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2562*d14abf15SRobert Mustacchi continue;
2563*d14abf15SRobert Mustacchi }
2564*d14abf15SRobert Mustacchi } else {
2565*d14abf15SRobert Mustacchi if (!force_buffer_division) {
2566*d14abf15SRobert Mustacchi force_buffer_division = TRUE;
2567*d14abf15SRobert Mustacchi gen_buf_copy_to = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2568*d14abf15SRobert Mustacchi next_buffer = NEXT_GEN_BUF(gen_buf_copy_to);
2569*d14abf15SRobert Mustacchi gen_buf_offset = gen_info->first_buf_offset;
2570*d14abf15SRobert Mustacchi if (gen_buf_offset) {
2571*d14abf15SRobert Mustacchi /* move to start of buffer*/
2572*d14abf15SRobert Mustacchi mm_memcpy(gen_buf_copy_to->buf_virt,
2573*d14abf15SRobert Mustacchi gen_buf_copy_to->buf_virt + gen_buf_offset, gen_buf_copy_to->placed_bytes - gen_buf_offset);
2574*d14abf15SRobert Mustacchi gen_buf_copy_to->placed_bytes -= gen_buf_offset;
2575*d14abf15SRobert Mustacchi gen_buf_offset = gen_info->first_buf_offset = 0;
2576*d14abf15SRobert Mustacchi }
2577*d14abf15SRobert Mustacchi free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2578*d14abf15SRobert Mustacchi continue;
2579*d14abf15SRobert Mustacchi } else {
2580*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2581*d14abf15SRobert Mustacchi "###lm_squeeze_rx_buffer_list cid=%d: peninsula_list cnt (%d) is still more frag_count (%d)\n",
2582*d14abf15SRobert Mustacchi tcp->cid, buffers_number, adjust_number);
2583*d14abf15SRobert Mustacchi break;
2584*d14abf15SRobert Mustacchi }
2585*d14abf15SRobert Mustacchi }
2586*d14abf15SRobert Mustacchi }
2587*d14abf15SRobert Mustacchi *unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&unwanted_list);
2588*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2589*d14abf15SRobert Mustacchi "###lm_squeeze_rx_buffer_list cid=%d(%d,%d,%d): peninsula_list cnt is decreased till %d\n",
2590*d14abf15SRobert Mustacchi tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp->tcp_cached.rcv_indication_size, gen_buff_size, buffers_number);
2591*d14abf15SRobert Mustacchi } else {
2592*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl4rx | WARNl4sp,
2593*d14abf15SRobert Mustacchi "###lm_squeeze_rx_buffer_list cid=%d(%d,%d): could not replace %dB (%d bufs) into %d frags of %dB each\n",
2594*d14abf15SRobert Mustacchi tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp->tcp_cached.rcv_indication_size,
2595*d14abf15SRobert Mustacchi gen_info->peninsula_nbytes, buffers_number, adjust_number, gen_buff_size);
2596*d14abf15SRobert Mustacchi }
2597*d14abf15SRobert Mustacchi return buffers_number;
2598*d14abf15SRobert Mustacchi }
2599*d14abf15SRobert Mustacchi
lm_tcp_rx_clear_isles(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state,d_list_t * isles_list)2600*d14abf15SRobert Mustacchi void lm_tcp_rx_clear_isles(struct _lm_device_t * pdev, lm_tcp_state_t * tcp_state, d_list_t * isles_list)
2601*d14abf15SRobert Mustacchi {
2602*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t * gen_info;
2603*d14abf15SRobert Mustacchi u8_t isle_cnt;
2604*d14abf15SRobert Mustacchi
2605*d14abf15SRobert Mustacchi DbgBreakIf(!(tcp_state && tcp_state->rx_con));
2606*d14abf15SRobert Mustacchi gen_info = &tcp_state->rx_con->u.rx.gen_info;
2607*d14abf15SRobert Mustacchi while ((isle_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list))) {
2608*d14abf15SRobert Mustacchi d_list_t aux_isles_list;
2609*d14abf15SRobert Mustacchi d_list_init(&aux_isles_list, NULL, NULL, 0);
2610*d14abf15SRobert Mustacchi _lm_tcp_isle_remove(pdev, tcp_state, NON_EXISTENT_SB_IDX, isle_cnt, &aux_isles_list);
2611*d14abf15SRobert Mustacchi if (!d_list_is_empty(&aux_isles_list)) {
2612*d14abf15SRobert Mustacchi d_list_add_head(isles_list, &aux_isles_list);
2613*d14abf15SRobert Mustacchi }
2614*d14abf15SRobert Mustacchi }
2615*d14abf15SRobert Mustacchi return;
2616*d14abf15SRobert Mustacchi }
2617*d14abf15SRobert Mustacchi
2618