Commit | Line | Data |
---|---|---|
f3bd5173 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | #include "bna.h" | |
19 | #include "bfi.h" | |
20 | ||
21 | /** | |
22 | * IB | |
23 | */ | |
24 | static void | |
25 | bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) | |
26 | { | |
27 | ib->coalescing_timeo = coalescing_timeo; | |
28 | ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( | |
29 | (u32)ib->coalescing_timeo, 0); | |
30 | } | |
31 | ||
32 | /** | |
33 | * RXF | |
34 | */ | |
35 | ||
36 | #define bna_rxf_vlan_cfg_soft_reset(rxf) \ | |
37 | do { \ | |
38 | (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ | |
39 | (rxf)->vlan_strip_pending = true; \ | |
40 | } while (0) | |
41 | ||
42 | #define bna_rxf_rss_cfg_soft_reset(rxf) \ | |
43 | do { \ | |
44 | if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ | |
45 | (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ | |
46 | BNA_RSS_F_CFG_PENDING | \ | |
47 | BNA_RSS_F_STATUS_PENDING); \ | |
48 | } while (0) | |
49 | ||
50 | static int bna_rxf_cfg_apply(struct bna_rxf *rxf); | |
51 | static void bna_rxf_cfg_reset(struct bna_rxf *rxf); | |
52 | static int bna_rxf_fltr_clear(struct bna_rxf *rxf); | |
53 | static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); | |
54 | static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); | |
55 | static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); | |
56 | static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); | |
57 | static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, | |
58 | enum bna_cleanup_type cleanup); | |
59 | static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, | |
60 | enum bna_cleanup_type cleanup); | |
61 | static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, | |
62 | enum bna_cleanup_type cleanup); | |
63 | ||
64 | bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, | |
65 | enum bna_rxf_event); | |
66 | bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf, | |
67 | enum bna_rxf_event); | |
68 | bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, | |
69 | enum bna_rxf_event); | |
70 | bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, | |
71 | enum bna_rxf_event); | |
72 | bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf, | |
73 | enum bna_rxf_event); | |
74 | bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, | |
75 | enum bna_rxf_event); | |
76 | ||
77 | static void | |
78 | bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) | |
79 | { | |
80 | call_rxf_stop_cbfn(rxf); | |
81 | } | |
82 | ||
83 | static void | |
84 | bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) | |
85 | { | |
86 | switch (event) { | |
87 | case RXF_E_START: | |
88 | if (rxf->flags & BNA_RXF_F_PAUSED) { | |
89 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
90 | call_rxf_start_cbfn(rxf); | |
91 | } else | |
92 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
93 | break; | |
94 | ||
95 | case RXF_E_STOP: | |
96 | call_rxf_stop_cbfn(rxf); | |
97 | break; | |
98 | ||
99 | case RXF_E_FAIL: | |
100 | /* No-op */ | |
101 | break; | |
102 | ||
103 | case RXF_E_CONFIG: | |
104 | call_rxf_cam_fltr_cbfn(rxf); | |
105 | break; | |
106 | ||
107 | case RXF_E_PAUSE: | |
108 | rxf->flags |= BNA_RXF_F_PAUSED; | |
109 | call_rxf_pause_cbfn(rxf); | |
110 | break; | |
111 | ||
112 | case RXF_E_RESUME: | |
113 | rxf->flags &= ~BNA_RXF_F_PAUSED; | |
114 | call_rxf_resume_cbfn(rxf); | |
115 | break; | |
116 | ||
117 | default: | |
118 | bfa_sm_fault(event); | |
119 | } | |
120 | } | |
121 | ||
122 | static void | |
123 | bna_rxf_sm_paused_entry(struct bna_rxf *rxf) | |
124 | { | |
125 | call_rxf_pause_cbfn(rxf); | |
126 | } | |
127 | ||
128 | static void | |
129 | bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event) | |
130 | { | |
131 | switch (event) { | |
132 | case RXF_E_STOP: | |
133 | case RXF_E_FAIL: | |
134 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
135 | break; | |
136 | ||
137 | case RXF_E_CONFIG: | |
138 | call_rxf_cam_fltr_cbfn(rxf); | |
139 | break; | |
140 | ||
141 | case RXF_E_RESUME: | |
142 | rxf->flags &= ~BNA_RXF_F_PAUSED; | |
143 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
144 | break; | |
145 | ||
146 | default: | |
147 | bfa_sm_fault(event); | |
148 | } | |
149 | } | |
150 | ||
151 | static void | |
152 | bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) | |
153 | { | |
154 | if (!bna_rxf_cfg_apply(rxf)) { | |
155 | /* No more pending config updates */ | |
156 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
157 | } | |
158 | } | |
159 | ||
160 | static void | |
161 | bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
162 | { | |
163 | switch (event) { | |
164 | case RXF_E_STOP: | |
165 | bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); | |
166 | break; | |
167 | ||
168 | case RXF_E_FAIL: | |
169 | bna_rxf_cfg_reset(rxf); | |
170 | call_rxf_start_cbfn(rxf); | |
171 | call_rxf_cam_fltr_cbfn(rxf); | |
172 | call_rxf_resume_cbfn(rxf); | |
173 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
174 | break; | |
175 | ||
176 | case RXF_E_CONFIG: | |
177 | /* No-op */ | |
178 | break; | |
179 | ||
180 | case RXF_E_PAUSE: | |
181 | rxf->flags |= BNA_RXF_F_PAUSED; | |
182 | call_rxf_start_cbfn(rxf); | |
183 | bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); | |
184 | break; | |
185 | ||
186 | case RXF_E_FW_RESP: | |
187 | if (!bna_rxf_cfg_apply(rxf)) { | |
188 | /* No more pending config updates */ | |
189 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
190 | } | |
191 | break; | |
192 | ||
193 | default: | |
194 | bfa_sm_fault(event); | |
195 | } | |
196 | } | |
197 | ||
198 | static void | |
199 | bna_rxf_sm_started_entry(struct bna_rxf *rxf) | |
200 | { | |
201 | call_rxf_start_cbfn(rxf); | |
202 | call_rxf_cam_fltr_cbfn(rxf); | |
203 | call_rxf_resume_cbfn(rxf); | |
204 | } | |
205 | ||
206 | static void | |
207 | bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) | |
208 | { | |
209 | switch (event) { | |
210 | case RXF_E_STOP: | |
211 | case RXF_E_FAIL: | |
212 | bna_rxf_cfg_reset(rxf); | |
213 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
214 | break; | |
215 | ||
216 | case RXF_E_CONFIG: | |
217 | bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); | |
218 | break; | |
219 | ||
220 | case RXF_E_PAUSE: | |
221 | rxf->flags |= BNA_RXF_F_PAUSED; | |
222 | if (!bna_rxf_fltr_clear(rxf)) | |
223 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
224 | else | |
225 | bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait); | |
226 | break; | |
227 | ||
228 | default: | |
229 | bfa_sm_fault(event); | |
230 | } | |
231 | } | |
232 | ||
233 | static void | |
234 | bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf) | |
235 | { | |
236 | } | |
237 | ||
238 | static void | |
239 | bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
240 | { | |
241 | switch (event) { | |
242 | case RXF_E_FAIL: | |
243 | bna_rxf_cfg_reset(rxf); | |
244 | call_rxf_pause_cbfn(rxf); | |
245 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
246 | break; | |
247 | ||
248 | case RXF_E_FW_RESP: | |
249 | if (!bna_rxf_fltr_clear(rxf)) { | |
250 | /* No more pending CAM entries to clear */ | |
251 | bfa_fsm_set_state(rxf, bna_rxf_sm_paused); | |
252 | } | |
253 | break; | |
254 | ||
255 | default: | |
256 | bfa_sm_fault(event); | |
257 | } | |
258 | } | |
259 | ||
260 | static void | |
261 | bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) | |
262 | { | |
263 | } | |
264 | ||
265 | static void | |
266 | bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
267 | { | |
268 | switch (event) { | |
269 | case RXF_E_FAIL: | |
270 | case RXF_E_FW_RESP: | |
271 | bna_rxf_cfg_reset(rxf); | |
272 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
273 | break; | |
274 | ||
275 | default: | |
276 | bfa_sm_fault(event); | |
277 | } | |
278 | } | |
279 | ||
280 | static void | |
281 | bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, | |
282 | enum bfi_enet_h2i_msgs req_type) | |
283 | { | |
284 | struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; | |
285 | ||
286 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); | |
287 | req->mh.num_entries = htons( | |
288 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); | |
289 | memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); | |
290 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
291 | sizeof(struct bfi_enet_ucast_req), &req->mh); | |
292 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
293 | } | |
294 | ||
295 | static void | |
296 | bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) | |
297 | { | |
298 | struct bfi_enet_mcast_add_req *req = | |
299 | &rxf->bfi_enet_cmd.mcast_add_req; | |
300 | ||
301 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, | |
302 | 0, rxf->rx->rid); | |
303 | req->mh.num_entries = htons( | |
304 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); | |
305 | memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t)); | |
306 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
307 | sizeof(struct bfi_enet_mcast_add_req), &req->mh); | |
308 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
309 | } | |
310 | ||
311 | static void | |
312 | bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) | |
313 | { | |
314 | struct bfi_enet_mcast_del_req *req = | |
315 | &rxf->bfi_enet_cmd.mcast_del_req; | |
316 | ||
317 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, | |
318 | 0, rxf->rx->rid); | |
319 | req->mh.num_entries = htons( | |
320 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); | |
321 | req->handle = htons(handle); | |
322 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
323 | sizeof(struct bfi_enet_mcast_del_req), &req->mh); | |
324 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
325 | } | |
326 | ||
327 | static void | |
328 | bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) | |
329 | { | |
330 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
331 | ||
332 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
333 | BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); | |
334 | req->mh.num_entries = htons( | |
335 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
336 | req->enable = status; | |
337 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
338 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
339 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
340 | } | |
341 | ||
342 | static void | |
343 | bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) | |
344 | { | |
345 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
346 | ||
347 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
348 | BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); | |
349 | req->mh.num_entries = htons( | |
350 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
351 | req->enable = status; | |
352 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
353 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
354 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
355 | } | |
356 | ||
357 | static void | |
358 | bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) | |
359 | { | |
360 | struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; | |
361 | int i; | |
362 | int j; | |
363 | ||
364 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
365 | BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); | |
366 | req->mh.num_entries = htons( | |
367 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); | |
368 | req->block_idx = block_idx; | |
369 | for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { | |
370 | j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; | |
371 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) | |
372 | req->bit_mask[i] = | |
373 | htonl(rxf->vlan_filter_table[j]); | |
374 | else | |
375 | req->bit_mask[i] = 0xFFFFFFFF; | |
376 | } | |
377 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
378 | sizeof(struct bfi_enet_rx_vlan_req), &req->mh); | |
379 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
380 | } | |
381 | ||
382 | static void | |
383 | bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) | |
384 | { | |
385 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
386 | ||
387 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
388 | BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); | |
389 | req->mh.num_entries = htons( | |
390 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
391 | req->enable = rxf->vlan_strip_status; | |
392 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
393 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
394 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
395 | } | |
396 | ||
397 | static void | |
398 | bna_bfi_rit_cfg(struct bna_rxf *rxf) | |
399 | { | |
400 | struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; | |
401 | ||
402 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
403 | BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); | |
404 | req->mh.num_entries = htons( | |
405 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); | |
406 | req->size = htons(rxf->rit_size); | |
407 | memcpy(&req->table[0], rxf->rit, rxf->rit_size); | |
408 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
409 | sizeof(struct bfi_enet_rit_req), &req->mh); | |
410 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
411 | } | |
412 | ||
413 | static void | |
414 | bna_bfi_rss_cfg(struct bna_rxf *rxf) | |
415 | { | |
416 | struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; | |
417 | int i; | |
418 | ||
419 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
420 | BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); | |
421 | req->mh.num_entries = htons( | |
422 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); | |
423 | req->cfg.type = rxf->rss_cfg.hash_type; | |
424 | req->cfg.mask = rxf->rss_cfg.hash_mask; | |
425 | for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) | |
426 | req->cfg.key[i] = | |
427 | htonl(rxf->rss_cfg.toeplitz_hash_key[i]); | |
428 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
429 | sizeof(struct bfi_enet_rss_cfg_req), &req->mh); | |
430 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
431 | } | |
432 | ||
433 | static void | |
434 | bna_bfi_rss_enable(struct bna_rxf *rxf) | |
435 | { | |
436 | struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; | |
437 | ||
438 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
439 | BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); | |
440 | req->mh.num_entries = htons( | |
441 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); | |
442 | req->enable = rxf->rss_status; | |
443 | bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, | |
444 | sizeof(struct bfi_enet_enable_req), &req->mh); | |
445 | bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); | |
446 | } | |
447 | ||
448 | /* This function gets the multicast MAC that has already been added to CAM */ | |
449 | static struct bna_mac * | |
450 | bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr) | |
451 | { | |
452 | struct bna_mac *mac; | |
453 | struct list_head *qe; | |
454 | ||
455 | list_for_each(qe, &rxf->mcast_active_q) { | |
456 | mac = (struct bna_mac *)qe; | |
457 | if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) | |
458 | return mac; | |
459 | } | |
460 | ||
461 | list_for_each(qe, &rxf->mcast_pending_del_q) { | |
462 | mac = (struct bna_mac *)qe; | |
463 | if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr)) | |
464 | return mac; | |
465 | } | |
466 | ||
467 | return NULL; | |
468 | } | |
469 | ||
470 | static struct bna_mcam_handle * | |
471 | bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) | |
472 | { | |
473 | struct bna_mcam_handle *mchandle; | |
474 | struct list_head *qe; | |
475 | ||
476 | list_for_each(qe, &rxf->mcast_handle_q) { | |
477 | mchandle = (struct bna_mcam_handle *)qe; | |
478 | if (mchandle->handle == handle) | |
479 | return mchandle; | |
480 | } | |
481 | ||
482 | return NULL; | |
483 | } | |
484 | ||
485 | static void | |
486 | bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) | |
487 | { | |
488 | struct bna_mac *mcmac; | |
489 | struct bna_mcam_handle *mchandle; | |
490 | ||
491 | mcmac = bna_rxf_mcmac_get(rxf, mac_addr); | |
492 | mchandle = bna_rxf_mchandle_get(rxf, handle); | |
493 | if (mchandle == NULL) { | |
494 | mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); | |
495 | mchandle->handle = handle; | |
496 | mchandle->refcnt = 0; | |
497 | list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); | |
498 | } | |
499 | mchandle->refcnt++; | |
500 | mcmac->handle = mchandle; | |
501 | } | |
502 | ||
503 | static int | |
504 | bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, | |
505 | enum bna_cleanup_type cleanup) | |
506 | { | |
507 | struct bna_mcam_handle *mchandle; | |
508 | int ret = 0; | |
509 | ||
510 | mchandle = mac->handle; | |
511 | if (mchandle == NULL) | |
512 | return ret; | |
513 | ||
514 | mchandle->refcnt--; | |
515 | if (mchandle->refcnt == 0) { | |
516 | if (cleanup == BNA_HARD_CLEANUP) { | |
517 | bna_bfi_mcast_del_req(rxf, mchandle->handle); | |
518 | ret = 1; | |
519 | } | |
520 | list_del(&mchandle->qe); | |
521 | bfa_q_qe_init(&mchandle->qe); | |
522 | bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); | |
523 | } | |
524 | mac->handle = NULL; | |
525 | ||
526 | return ret; | |
527 | } | |
528 | ||
529 | static int | |
530 | bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) | |
531 | { | |
532 | struct bna_mac *mac = NULL; | |
533 | struct list_head *qe; | |
534 | int ret; | |
535 | ||
536 | /* Delete multicast entries previousely added */ | |
537 | while (!list_empty(&rxf->mcast_pending_del_q)) { | |
538 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
539 | bfa_q_qe_init(qe); | |
540 | mac = (struct bna_mac *)qe; | |
541 | ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); | |
542 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
543 | if (ret) | |
544 | return ret; | |
545 | } | |
546 | ||
547 | /* Add multicast entries */ | |
548 | if (!list_empty(&rxf->mcast_pending_add_q)) { | |
549 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
550 | bfa_q_qe_init(qe); | |
551 | mac = (struct bna_mac *)qe; | |
552 | list_add_tail(&mac->qe, &rxf->mcast_active_q); | |
553 | bna_bfi_mcast_add_req(rxf, mac); | |
554 | return 1; | |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
560 | static int | |
561 | bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) | |
562 | { | |
563 | u8 vlan_pending_bitmask; | |
564 | int block_idx = 0; | |
565 | ||
566 | if (rxf->vlan_pending_bitmask) { | |
567 | vlan_pending_bitmask = rxf->vlan_pending_bitmask; | |
568 | while (!(vlan_pending_bitmask & 0x1)) { | |
569 | block_idx++; | |
570 | vlan_pending_bitmask >>= 1; | |
571 | } | |
572 | rxf->vlan_pending_bitmask &= ~(1 << block_idx); | |
573 | bna_bfi_rx_vlan_filter_set(rxf, block_idx); | |
574 | return 1; | |
575 | } | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
580 | static int | |
581 | bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
582 | { | |
583 | struct list_head *qe; | |
584 | struct bna_mac *mac; | |
585 | int ret; | |
586 | ||
587 | /* Throw away delete pending mcast entries */ | |
588 | while (!list_empty(&rxf->mcast_pending_del_q)) { | |
589 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
590 | bfa_q_qe_init(qe); | |
591 | mac = (struct bna_mac *)qe; | |
592 | ret = bna_rxf_mcast_del(rxf, mac, cleanup); | |
593 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
594 | if (ret) | |
595 | return ret; | |
596 | } | |
597 | ||
598 | /* Move active mcast entries to pending_add_q */ | |
599 | while (!list_empty(&rxf->mcast_active_q)) { | |
600 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
601 | bfa_q_qe_init(qe); | |
602 | list_add_tail(qe, &rxf->mcast_pending_add_q); | |
603 | mac = (struct bna_mac *)qe; | |
604 | if (bna_rxf_mcast_del(rxf, mac, cleanup)) | |
605 | return 1; | |
606 | } | |
607 | ||
608 | return 0; | |
609 | } | |
610 | ||
611 | static int | |
612 | bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) | |
613 | { | |
614 | if (rxf->rss_pending) { | |
615 | if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { | |
616 | rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; | |
617 | bna_bfi_rit_cfg(rxf); | |
618 | return 1; | |
619 | } | |
620 | ||
621 | if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { | |
622 | rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; | |
623 | bna_bfi_rss_cfg(rxf); | |
624 | return 1; | |
625 | } | |
626 | ||
627 | if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { | |
628 | rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; | |
629 | bna_bfi_rss_enable(rxf); | |
630 | return 1; | |
631 | } | |
632 | } | |
633 | ||
634 | return 0; | |
635 | } | |
636 | ||
637 | static int | |
638 | bna_rxf_cfg_apply(struct bna_rxf *rxf) | |
639 | { | |
640 | if (bna_rxf_ucast_cfg_apply(rxf)) | |
641 | return 1; | |
642 | ||
643 | if (bna_rxf_mcast_cfg_apply(rxf)) | |
644 | return 1; | |
645 | ||
646 | if (bna_rxf_promisc_cfg_apply(rxf)) | |
647 | return 1; | |
648 | ||
649 | if (bna_rxf_allmulti_cfg_apply(rxf)) | |
650 | return 1; | |
651 | ||
652 | if (bna_rxf_vlan_cfg_apply(rxf)) | |
653 | return 1; | |
654 | ||
655 | if (bna_rxf_vlan_strip_cfg_apply(rxf)) | |
656 | return 1; | |
657 | ||
658 | if (bna_rxf_rss_cfg_apply(rxf)) | |
659 | return 1; | |
660 | ||
661 | return 0; | |
662 | } | |
663 | ||
664 | /* Only software reset */ | |
665 | static int | |
666 | bna_rxf_fltr_clear(struct bna_rxf *rxf) | |
667 | { | |
668 | if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
669 | return 1; | |
670 | ||
671 | if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
672 | return 1; | |
673 | ||
674 | if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
675 | return 1; | |
676 | ||
677 | if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP)) | |
678 | return 1; | |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
683 | static void | |
684 | bna_rxf_cfg_reset(struct bna_rxf *rxf) | |
685 | { | |
686 | bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
687 | bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
688 | bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
689 | bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); | |
690 | bna_rxf_vlan_cfg_soft_reset(rxf); | |
691 | bna_rxf_rss_cfg_soft_reset(rxf); | |
692 | } | |
693 | ||
694 | static void | |
695 | bna_rit_init(struct bna_rxf *rxf, int rit_size) | |
696 | { | |
697 | struct bna_rx *rx = rxf->rx; | |
698 | struct bna_rxp *rxp; | |
699 | struct list_head *qe; | |
700 | int offset = 0; | |
701 | ||
702 | rxf->rit_size = rit_size; | |
703 | list_for_each(qe, &rx->rxp_q) { | |
704 | rxp = (struct bna_rxp *)qe; | |
705 | rxf->rit[offset] = rxp->cq.ccb->id; | |
706 | offset++; | |
707 | } | |
708 | ||
709 | } | |
710 | ||
711 | void | |
712 | bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) | |
713 | { | |
714 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); | |
715 | } | |
716 | ||
717 | void | |
718 | bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, | |
719 | struct bfi_msgq_mhdr *msghdr) | |
720 | { | |
721 | struct bfi_enet_mcast_add_req *req = | |
722 | &rxf->bfi_enet_cmd.mcast_add_req; | |
723 | struct bfi_enet_mcast_add_rsp *rsp = | |
724 | (struct bfi_enet_mcast_add_rsp *)msghdr; | |
725 | ||
726 | bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, | |
727 | ntohs(rsp->handle)); | |
728 | bfa_fsm_send_event(rxf, RXF_E_FW_RESP); | |
729 | } | |
730 | ||
731 | static void | |
732 | bna_rxf_init(struct bna_rxf *rxf, | |
733 | struct bna_rx *rx, | |
734 | struct bna_rx_config *q_config, | |
735 | struct bna_res_info *res_info) | |
736 | { | |
737 | rxf->rx = rx; | |
738 | ||
739 | INIT_LIST_HEAD(&rxf->ucast_pending_add_q); | |
740 | INIT_LIST_HEAD(&rxf->ucast_pending_del_q); | |
741 | rxf->ucast_pending_set = 0; | |
742 | rxf->ucast_active_set = 0; | |
743 | INIT_LIST_HEAD(&rxf->ucast_active_q); | |
744 | rxf->ucast_pending_mac = NULL; | |
745 | ||
746 | INIT_LIST_HEAD(&rxf->mcast_pending_add_q); | |
747 | INIT_LIST_HEAD(&rxf->mcast_pending_del_q); | |
748 | INIT_LIST_HEAD(&rxf->mcast_active_q); | |
749 | INIT_LIST_HEAD(&rxf->mcast_handle_q); | |
750 | ||
751 | if (q_config->paused) | |
752 | rxf->flags |= BNA_RXF_F_PAUSED; | |
753 | ||
754 | rxf->rit = (u8 *) | |
755 | res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; | |
756 | bna_rit_init(rxf, q_config->num_paths); | |
757 | ||
758 | rxf->rss_status = q_config->rss_status; | |
759 | if (rxf->rss_status == BNA_STATUS_T_ENABLED) { | |
760 | rxf->rss_cfg = q_config->rss_config; | |
761 | rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; | |
762 | rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; | |
763 | rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; | |
764 | } | |
765 | ||
766 | rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; | |
767 | memset(rxf->vlan_filter_table, 0, | |
768 | (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); | |
769 | rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ | |
770 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; | |
771 | ||
772 | rxf->vlan_strip_status = q_config->vlan_strip_status; | |
773 | ||
774 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
775 | } | |
776 | ||
777 | static void | |
778 | bna_rxf_uninit(struct bna_rxf *rxf) | |
779 | { | |
780 | struct bna_mac *mac; | |
781 | ||
782 | rxf->ucast_pending_set = 0; | |
783 | rxf->ucast_active_set = 0; | |
784 | ||
785 | while (!list_empty(&rxf->ucast_pending_add_q)) { | |
786 | bfa_q_deq(&rxf->ucast_pending_add_q, &mac); | |
787 | bfa_q_qe_init(&mac->qe); | |
788 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | |
789 | } | |
790 | ||
791 | if (rxf->ucast_pending_mac) { | |
792 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | |
793 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, | |
794 | rxf->ucast_pending_mac); | |
795 | rxf->ucast_pending_mac = NULL; | |
796 | } | |
797 | ||
798 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
799 | bfa_q_deq(&rxf->mcast_pending_add_q, &mac); | |
800 | bfa_q_qe_init(&mac->qe); | |
801 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
802 | } | |
803 | ||
804 | rxf->rxmode_pending = 0; | |
805 | rxf->rxmode_pending_bitmask = 0; | |
806 | if (rxf->rx->bna->promisc_rid == rxf->rx->rid) | |
807 | rxf->rx->bna->promisc_rid = BFI_INVALID_RID; | |
808 | if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) | |
809 | rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; | |
810 | ||
811 | rxf->rss_pending = 0; | |
812 | rxf->vlan_strip_pending = false; | |
813 | ||
814 | rxf->flags = 0; | |
815 | ||
816 | rxf->rx = NULL; | |
817 | } | |
818 | ||
819 | static void | |
820 | bna_rx_cb_rxf_started(struct bna_rx *rx) | |
821 | { | |
822 | bfa_fsm_send_event(rx, RX_E_RXF_STARTED); | |
823 | } | |
824 | ||
825 | static void | |
826 | bna_rxf_start(struct bna_rxf *rxf) | |
827 | { | |
828 | rxf->start_cbfn = bna_rx_cb_rxf_started; | |
829 | rxf->start_cbarg = rxf->rx; | |
830 | bfa_fsm_send_event(rxf, RXF_E_START); | |
831 | } | |
832 | ||
833 | static void | |
834 | bna_rx_cb_rxf_stopped(struct bna_rx *rx) | |
835 | { | |
836 | bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); | |
837 | } | |
838 | ||
839 | static void | |
840 | bna_rxf_stop(struct bna_rxf *rxf) | |
841 | { | |
842 | rxf->stop_cbfn = bna_rx_cb_rxf_stopped; | |
843 | rxf->stop_cbarg = rxf->rx; | |
844 | bfa_fsm_send_event(rxf, RXF_E_STOP); | |
845 | } | |
846 | ||
847 | static void | |
848 | bna_rxf_fail(struct bna_rxf *rxf) | |
849 | { | |
850 | bfa_fsm_send_event(rxf, RXF_E_FAIL); | |
851 | } | |
852 | ||
853 | enum bna_cb_status | |
854 | bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, | |
855 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
856 | { | |
857 | struct bna_rxf *rxf = &rx->rxf; | |
858 | ||
859 | if (rxf->ucast_pending_mac == NULL) { | |
860 | rxf->ucast_pending_mac = | |
861 | bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); | |
862 | if (rxf->ucast_pending_mac == NULL) | |
863 | return BNA_CB_UCAST_CAM_FULL; | |
864 | bfa_q_qe_init(&rxf->ucast_pending_mac->qe); | |
865 | } | |
866 | ||
867 | memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN); | |
868 | rxf->ucast_pending_set = 1; | |
869 | rxf->cam_fltr_cbfn = cbfn; | |
870 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
871 | ||
872 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
873 | ||
874 | return BNA_CB_SUCCESS; | |
875 | } | |
876 | ||
877 | enum bna_cb_status | |
878 | bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, | |
879 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
880 | { | |
881 | struct bna_rxf *rxf = &rx->rxf; | |
882 | struct bna_mac *mac; | |
883 | ||
884 | /* Check if already added or pending addition */ | |
885 | if (bna_mac_find(&rxf->mcast_active_q, addr) || | |
886 | bna_mac_find(&rxf->mcast_pending_add_q, addr)) { | |
887 | if (cbfn) | |
888 | cbfn(rx->bna->bnad, rx); | |
889 | return BNA_CB_SUCCESS; | |
890 | } | |
891 | ||
892 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | |
893 | if (mac == NULL) | |
894 | return BNA_CB_MCAST_LIST_FULL; | |
895 | bfa_q_qe_init(&mac->qe); | |
896 | memcpy(mac->addr, addr, ETH_ALEN); | |
897 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
898 | ||
899 | rxf->cam_fltr_cbfn = cbfn; | |
900 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
901 | ||
902 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
903 | ||
904 | return BNA_CB_SUCCESS; | |
905 | } | |
906 | ||
907 | enum bna_cb_status | |
908 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | |
909 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
910 | { | |
911 | struct bna_rxf *rxf = &rx->rxf; | |
912 | struct list_head list_head; | |
913 | struct list_head *qe; | |
914 | u8 *mcaddr; | |
915 | struct bna_mac *mac; | |
916 | int i; | |
917 | ||
918 | /* Allocate nodes */ | |
919 | INIT_LIST_HEAD(&list_head); | |
920 | for (i = 0, mcaddr = mclist; i < count; i++) { | |
921 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | |
922 | if (mac == NULL) | |
923 | goto err_return; | |
924 | bfa_q_qe_init(&mac->qe); | |
925 | memcpy(mac->addr, mcaddr, ETH_ALEN); | |
926 | list_add_tail(&mac->qe, &list_head); | |
927 | ||
928 | mcaddr += ETH_ALEN; | |
929 | } | |
930 | ||
931 | /* Purge the pending_add_q */ | |
932 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
933 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
934 | bfa_q_qe_init(qe); | |
935 | mac = (struct bna_mac *)qe; | |
936 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
937 | } | |
938 | ||
939 | /* Schedule active_q entries for deletion */ | |
940 | while (!list_empty(&rxf->mcast_active_q)) { | |
941 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
942 | mac = (struct bna_mac *)qe; | |
943 | bfa_q_qe_init(&mac->qe); | |
944 | list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); | |
945 | } | |
946 | ||
947 | /* Add the new entries */ | |
948 | while (!list_empty(&list_head)) { | |
949 | bfa_q_deq(&list_head, &qe); | |
950 | mac = (struct bna_mac *)qe; | |
951 | bfa_q_qe_init(&mac->qe); | |
952 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
953 | } | |
954 | ||
955 | rxf->cam_fltr_cbfn = cbfn; | |
956 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
957 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
958 | ||
959 | return BNA_CB_SUCCESS; | |
960 | ||
961 | err_return: | |
962 | while (!list_empty(&list_head)) { | |
963 | bfa_q_deq(&list_head, &qe); | |
964 | mac = (struct bna_mac *)qe; | |
965 | bfa_q_qe_init(&mac->qe); | |
966 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
967 | } | |
968 | ||
969 | return BNA_CB_MCAST_LIST_FULL; | |
970 | } | |
971 | ||
972 | void | |
973 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) | |
974 | { | |
975 | struct bna_rxf *rxf = &rx->rxf; | |
976 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); | |
977 | int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); | |
978 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); | |
979 | ||
980 | rxf->vlan_filter_table[index] |= bit; | |
981 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
982 | rxf->vlan_pending_bitmask |= (1 << group_id); | |
983 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
984 | } | |
985 | } | |
986 | ||
987 | void | |
988 | bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) | |
989 | { | |
990 | struct bna_rxf *rxf = &rx->rxf; | |
991 | int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); | |
992 | int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK)); | |
993 | int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); | |
994 | ||
995 | rxf->vlan_filter_table[index] &= ~bit; | |
996 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
997 | rxf->vlan_pending_bitmask |= (1 << group_id); | |
998 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
999 | } | |
1000 | } | |
1001 | ||
1002 | static int | |
1003 | bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) | |
1004 | { | |
1005 | struct bna_mac *mac = NULL; | |
1006 | struct list_head *qe; | |
1007 | ||
1008 | /* Delete MAC addresses previousely added */ | |
1009 | if (!list_empty(&rxf->ucast_pending_del_q)) { | |
1010 | bfa_q_deq(&rxf->ucast_pending_del_q, &qe); | |
1011 | bfa_q_qe_init(qe); | |
1012 | mac = (struct bna_mac *)qe; | |
1013 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
1014 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | |
1015 | return 1; | |
1016 | } | |
1017 | ||
1018 | /* Set default unicast MAC */ | |
1019 | if (rxf->ucast_pending_set) { | |
1020 | rxf->ucast_pending_set = 0; | |
1021 | memcpy(rxf->ucast_active_mac.addr, | |
1022 | rxf->ucast_pending_mac->addr, ETH_ALEN); | |
1023 | rxf->ucast_active_set = 1; | |
1024 | bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, | |
1025 | BFI_ENET_H2I_MAC_UCAST_SET_REQ); | |
1026 | return 1; | |
1027 | } | |
1028 | ||
1029 | /* Add additional MAC entries */ | |
1030 | if (!list_empty(&rxf->ucast_pending_add_q)) { | |
1031 | bfa_q_deq(&rxf->ucast_pending_add_q, &qe); | |
1032 | bfa_q_qe_init(qe); | |
1033 | mac = (struct bna_mac *)qe; | |
1034 | list_add_tail(&mac->qe, &rxf->ucast_active_q); | |
1035 | bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); | |
1036 | return 1; | |
1037 | } | |
1038 | ||
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | static int | |
1043 | bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1044 | { | |
1045 | struct list_head *qe; | |
1046 | struct bna_mac *mac; | |
1047 | ||
1048 | /* Throw away delete pending ucast entries */ | |
1049 | while (!list_empty(&rxf->ucast_pending_del_q)) { | |
1050 | bfa_q_deq(&rxf->ucast_pending_del_q, &qe); | |
1051 | bfa_q_qe_init(qe); | |
1052 | mac = (struct bna_mac *)qe; | |
1053 | if (cleanup == BNA_SOFT_CLEANUP) | |
1054 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | |
1055 | else { | |
1056 | bna_bfi_ucast_req(rxf, mac, | |
1057 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
1058 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | |
1059 | return 1; | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | /* Move active ucast entries to pending_add_q */ | |
1064 | while (!list_empty(&rxf->ucast_active_q)) { | |
1065 | bfa_q_deq(&rxf->ucast_active_q, &qe); | |
1066 | bfa_q_qe_init(qe); | |
1067 | list_add_tail(qe, &rxf->ucast_pending_add_q); | |
1068 | if (cleanup == BNA_HARD_CLEANUP) { | |
1069 | mac = (struct bna_mac *)qe; | |
1070 | bna_bfi_ucast_req(rxf, mac, | |
1071 | BFI_ENET_H2I_MAC_UCAST_DEL_REQ); | |
1072 | return 1; | |
1073 | } | |
1074 | } | |
1075 | ||
1076 | if (rxf->ucast_active_set) { | |
1077 | rxf->ucast_pending_set = 1; | |
1078 | rxf->ucast_active_set = 0; | |
1079 | if (cleanup == BNA_HARD_CLEANUP) { | |
1080 | bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, | |
1081 | BFI_ENET_H2I_MAC_UCAST_CLR_REQ); | |
1082 | return 1; | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | static int | |
1090 | bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) | |
1091 | { | |
1092 | struct bna *bna = rxf->rx->bna; | |
1093 | ||
1094 | /* Enable/disable promiscuous mode */ | |
1095 | if (is_promisc_enable(rxf->rxmode_pending, | |
1096 | rxf->rxmode_pending_bitmask)) { | |
1097 | /* move promisc configuration from pending -> active */ | |
1098 | promisc_inactive(rxf->rxmode_pending, | |
1099 | rxf->rxmode_pending_bitmask); | |
1100 | rxf->rxmode_active |= BNA_RXMODE_PROMISC; | |
1101 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); | |
1102 | return 1; | |
1103 | } else if (is_promisc_disable(rxf->rxmode_pending, | |
1104 | rxf->rxmode_pending_bitmask)) { | |
1105 | /* move promisc configuration from pending -> active */ | |
1106 | promisc_inactive(rxf->rxmode_pending, | |
1107 | rxf->rxmode_pending_bitmask); | |
1108 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1109 | bna->promisc_rid = BFI_INVALID_RID; | |
1110 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1111 | return 1; | |
1112 | } | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | static int | |
1118 | bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1119 | { | |
1120 | struct bna *bna = rxf->rx->bna; | |
1121 | ||
1122 | /* Clear pending promisc mode disable */ | |
1123 | if (is_promisc_disable(rxf->rxmode_pending, | |
1124 | rxf->rxmode_pending_bitmask)) { | |
1125 | promisc_inactive(rxf->rxmode_pending, | |
1126 | rxf->rxmode_pending_bitmask); | |
1127 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1128 | bna->promisc_rid = BFI_INVALID_RID; | |
1129 | if (cleanup == BNA_HARD_CLEANUP) { | |
1130 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1131 | return 1; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | /* Move promisc mode config from active -> pending */ | |
1136 | if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { | |
1137 | promisc_enable(rxf->rxmode_pending, | |
1138 | rxf->rxmode_pending_bitmask); | |
1139 | rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; | |
1140 | if (cleanup == BNA_HARD_CLEANUP) { | |
1141 | bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); | |
1142 | return 1; | |
1143 | } | |
1144 | } | |
1145 | ||
1146 | return 0; | |
1147 | } | |
1148 | ||
1149 | static int | |
1150 | bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) | |
1151 | { | |
1152 | /* Enable/disable allmulti mode */ | |
1153 | if (is_allmulti_enable(rxf->rxmode_pending, | |
1154 | rxf->rxmode_pending_bitmask)) { | |
1155 | /* move allmulti configuration from pending -> active */ | |
1156 | allmulti_inactive(rxf->rxmode_pending, | |
1157 | rxf->rxmode_pending_bitmask); | |
1158 | rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; | |
1159 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); | |
1160 | return 1; | |
1161 | } else if (is_allmulti_disable(rxf->rxmode_pending, | |
1162 | rxf->rxmode_pending_bitmask)) { | |
1163 | /* move allmulti configuration from pending -> active */ | |
1164 | allmulti_inactive(rxf->rxmode_pending, | |
1165 | rxf->rxmode_pending_bitmask); | |
1166 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1167 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1168 | return 1; | |
1169 | } | |
1170 | ||
1171 | return 0; | |
1172 | } | |
1173 | ||
1174 | static int | |
1175 | bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) | |
1176 | { | |
1177 | /* Clear pending allmulti mode disable */ | |
1178 | if (is_allmulti_disable(rxf->rxmode_pending, | |
1179 | rxf->rxmode_pending_bitmask)) { | |
1180 | allmulti_inactive(rxf->rxmode_pending, | |
1181 | rxf->rxmode_pending_bitmask); | |
1182 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1183 | if (cleanup == BNA_HARD_CLEANUP) { | |
1184 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1185 | return 1; | |
1186 | } | |
1187 | } | |
1188 | ||
1189 | /* Move allmulti mode config from active -> pending */ | |
1190 | if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { | |
1191 | allmulti_enable(rxf->rxmode_pending, | |
1192 | rxf->rxmode_pending_bitmask); | |
1193 | rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; | |
1194 | if (cleanup == BNA_HARD_CLEANUP) { | |
1195 | bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); | |
1196 | return 1; | |
1197 | } | |
1198 | } | |
1199 | ||
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | static int | |
1204 | bna_rxf_promisc_enable(struct bna_rxf *rxf) | |
1205 | { | |
1206 | struct bna *bna = rxf->rx->bna; | |
1207 | int ret = 0; | |
1208 | ||
1209 | if (is_promisc_enable(rxf->rxmode_pending, | |
1210 | rxf->rxmode_pending_bitmask) || | |
1211 | (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { | |
1212 | /* Do nothing if pending enable or already enabled */ | |
1213 | } else if (is_promisc_disable(rxf->rxmode_pending, | |
1214 | rxf->rxmode_pending_bitmask)) { | |
1215 | /* Turn off pending disable command */ | |
1216 | promisc_inactive(rxf->rxmode_pending, | |
1217 | rxf->rxmode_pending_bitmask); | |
1218 | } else { | |
1219 | /* Schedule enable */ | |
1220 | promisc_enable(rxf->rxmode_pending, | |
1221 | rxf->rxmode_pending_bitmask); | |
1222 | bna->promisc_rid = rxf->rx->rid; | |
1223 | ret = 1; | |
1224 | } | |
1225 | ||
1226 | return ret; | |
1227 | } | |
1228 | ||
1229 | static int | |
1230 | bna_rxf_promisc_disable(struct bna_rxf *rxf) | |
1231 | { | |
1232 | struct bna *bna = rxf->rx->bna; | |
1233 | int ret = 0; | |
1234 | ||
1235 | if (is_promisc_disable(rxf->rxmode_pending, | |
1236 | rxf->rxmode_pending_bitmask) || | |
1237 | (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { | |
1238 | /* Do nothing if pending disable or already disabled */ | |
1239 | } else if (is_promisc_enable(rxf->rxmode_pending, | |
1240 | rxf->rxmode_pending_bitmask)) { | |
1241 | /* Turn off pending enable command */ | |
1242 | promisc_inactive(rxf->rxmode_pending, | |
1243 | rxf->rxmode_pending_bitmask); | |
1244 | bna->promisc_rid = BFI_INVALID_RID; | |
1245 | } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { | |
1246 | /* Schedule disable */ | |
1247 | promisc_disable(rxf->rxmode_pending, | |
1248 | rxf->rxmode_pending_bitmask); | |
1249 | ret = 1; | |
1250 | } | |
1251 | ||
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | static int | |
1256 | bna_rxf_allmulti_enable(struct bna_rxf *rxf) | |
1257 | { | |
1258 | int ret = 0; | |
1259 | ||
1260 | if (is_allmulti_enable(rxf->rxmode_pending, | |
1261 | rxf->rxmode_pending_bitmask) || | |
1262 | (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { | |
1263 | /* Do nothing if pending enable or already enabled */ | |
1264 | } else if (is_allmulti_disable(rxf->rxmode_pending, | |
1265 | rxf->rxmode_pending_bitmask)) { | |
1266 | /* Turn off pending disable command */ | |
1267 | allmulti_inactive(rxf->rxmode_pending, | |
1268 | rxf->rxmode_pending_bitmask); | |
1269 | } else { | |
1270 | /* Schedule enable */ | |
1271 | allmulti_enable(rxf->rxmode_pending, | |
1272 | rxf->rxmode_pending_bitmask); | |
1273 | ret = 1; | |
1274 | } | |
1275 | ||
1276 | return ret; | |
1277 | } | |
1278 | ||
1279 | static int | |
1280 | bna_rxf_allmulti_disable(struct bna_rxf *rxf) | |
1281 | { | |
1282 | int ret = 0; | |
1283 | ||
1284 | if (is_allmulti_disable(rxf->rxmode_pending, | |
1285 | rxf->rxmode_pending_bitmask) || | |
1286 | (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { | |
1287 | /* Do nothing if pending disable or already disabled */ | |
1288 | } else if (is_allmulti_enable(rxf->rxmode_pending, | |
1289 | rxf->rxmode_pending_bitmask)) { | |
1290 | /* Turn off pending enable command */ | |
1291 | allmulti_inactive(rxf->rxmode_pending, | |
1292 | rxf->rxmode_pending_bitmask); | |
1293 | } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { | |
1294 | /* Schedule disable */ | |
1295 | allmulti_disable(rxf->rxmode_pending, | |
1296 | rxf->rxmode_pending_bitmask); | |
1297 | ret = 1; | |
1298 | } | |
1299 | ||
1300 | return ret; | |
1301 | } | |
1302 | ||
1303 | static int | |
1304 | bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) | |
1305 | { | |
1306 | if (rxf->vlan_strip_pending) { | |
1307 | rxf->vlan_strip_pending = false; | |
1308 | bna_bfi_vlan_strip_enable(rxf); | |
1309 | return 1; | |
1310 | } | |
1311 | ||
1312 | return 0; | |
1313 | } | |
1314 | ||
1315 | /** | |
1316 | * RX | |
1317 | */ | |
1318 | ||
1319 | #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ | |
1320 | (qcfg)->num_paths : ((qcfg)->num_paths * 2)) | |
1321 | ||
1322 | #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ | |
1323 | (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) | |
1324 | ||
1325 | #define call_rx_stop_cbfn(rx) \ | |
1326 | do { \ | |
1327 | if ((rx)->stop_cbfn) { \ | |
1328 | void (*cbfn)(void *, struct bna_rx *); \ | |
1329 | void *cbarg; \ | |
1330 | cbfn = (rx)->stop_cbfn; \ | |
1331 | cbarg = (rx)->stop_cbarg; \ | |
1332 | (rx)->stop_cbfn = NULL; \ | |
1333 | (rx)->stop_cbarg = NULL; \ | |
1334 | cbfn(cbarg, rx); \ | |
1335 | } \ | |
1336 | } while (0) | |
1337 | ||
1338 | #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ | |
1339 | do { \ | |
1340 | struct bna_dma_addr cur_q_addr = \ | |
1341 | *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ | |
1342 | (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ | |
1343 | (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ | |
1344 | (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ | |
1345 | (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ | |
1346 | (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ | |
1347 | (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ | |
1348 | } while (0) | |
1349 | ||
1350 | static void bna_bfi_rx_enet_start(struct bna_rx *rx); | |
1351 | static void bna_rx_enet_stop(struct bna_rx *rx); | |
1352 | static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); | |
1353 | ||
1354 | bfa_fsm_state_decl(bna_rx, stopped, | |
1355 | struct bna_rx, enum bna_rx_event); | |
1356 | bfa_fsm_state_decl(bna_rx, start_wait, | |
1357 | struct bna_rx, enum bna_rx_event); | |
1358 | bfa_fsm_state_decl(bna_rx, rxf_start_wait, | |
1359 | struct bna_rx, enum bna_rx_event); | |
1360 | bfa_fsm_state_decl(bna_rx, started, | |
1361 | struct bna_rx, enum bna_rx_event); | |
1362 | bfa_fsm_state_decl(bna_rx, rxf_stop_wait, | |
1363 | struct bna_rx, enum bna_rx_event); | |
1364 | bfa_fsm_state_decl(bna_rx, stop_wait, | |
1365 | struct bna_rx, enum bna_rx_event); | |
1366 | bfa_fsm_state_decl(bna_rx, cleanup_wait, | |
1367 | struct bna_rx, enum bna_rx_event); | |
1368 | bfa_fsm_state_decl(bna_rx, failed, | |
1369 | struct bna_rx, enum bna_rx_event); | |
1370 | bfa_fsm_state_decl(bna_rx, quiesce_wait, | |
1371 | struct bna_rx, enum bna_rx_event); | |
1372 | ||
1373 | static void bna_rx_sm_stopped_entry(struct bna_rx *rx) | |
1374 | { | |
1375 | call_rx_stop_cbfn(rx); | |
1376 | } | |
1377 | ||
1378 | static void bna_rx_sm_stopped(struct bna_rx *rx, | |
1379 | enum bna_rx_event event) | |
1380 | { | |
1381 | switch (event) { | |
1382 | case RX_E_START: | |
1383 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); | |
1384 | break; | |
1385 | ||
1386 | case RX_E_STOP: | |
1387 | call_rx_stop_cbfn(rx); | |
1388 | break; | |
1389 | ||
1390 | case RX_E_FAIL: | |
1391 | /* no-op */ | |
1392 | break; | |
1393 | ||
1394 | default: | |
1395 | bfa_sm_fault(event); | |
1396 | break; | |
1397 | } | |
1398 | } | |
1399 | ||
1400 | static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) | |
1401 | { | |
1402 | bna_bfi_rx_enet_start(rx); | |
1403 | } | |
1404 | ||
1405 | void | |
1406 | bna_rx_sm_stop_wait_entry(struct bna_rx *rx) | |
1407 | { | |
1408 | } | |
1409 | ||
1410 | static void | |
1411 | bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1412 | { | |
1413 | switch (event) { | |
1414 | case RX_E_FAIL: | |
1415 | case RX_E_STOPPED: | |
1416 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1417 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); | |
1418 | break; | |
1419 | ||
1420 | case RX_E_STARTED: | |
1421 | bna_rx_enet_stop(rx); | |
1422 | break; | |
1423 | ||
1424 | default: | |
1425 | bfa_sm_fault(event); | |
1426 | break; | |
1427 | } | |
1428 | } | |
1429 | ||
1430 | static void bna_rx_sm_start_wait(struct bna_rx *rx, | |
1431 | enum bna_rx_event event) | |
1432 | { | |
1433 | switch (event) { | |
1434 | case RX_E_STOP: | |
1435 | bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); | |
1436 | break; | |
1437 | ||
1438 | case RX_E_FAIL: | |
1439 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1440 | break; | |
1441 | ||
1442 | case RX_E_STARTED: | |
1443 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); | |
1444 | break; | |
1445 | ||
1446 | default: | |
1447 | bfa_sm_fault(event); | |
1448 | break; | |
1449 | } | |
1450 | } | |
1451 | ||
1452 | static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) | |
1453 | { | |
1454 | rx->rx_post_cbfn(rx->bna->bnad, rx); | |
1455 | bna_rxf_start(&rx->rxf); | |
1456 | } | |
1457 | ||
1458 | void | |
1459 | bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) | |
1460 | { | |
1461 | } | |
1462 | ||
1463 | static void | |
1464 | bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1465 | { | |
1466 | switch (event) { | |
1467 | case RX_E_FAIL: | |
1468 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1469 | bna_rxf_fail(&rx->rxf); | |
1470 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); | |
1471 | break; | |
1472 | ||
1473 | case RX_E_RXF_STARTED: | |
1474 | bna_rxf_stop(&rx->rxf); | |
1475 | break; | |
1476 | ||
1477 | case RX_E_RXF_STOPPED: | |
1478 | bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); | |
1479 | bna_rx_enet_stop(rx); | |
1480 | break; | |
1481 | ||
1482 | default: | |
1483 | bfa_sm_fault(event); | |
1484 | break; | |
1485 | } | |
1486 | ||
1487 | } | |
1488 | ||
1489 | void | |
1490 | bna_rx_sm_started_entry(struct bna_rx *rx) | |
1491 | { | |
1492 | struct bna_rxp *rxp; | |
1493 | struct list_head *qe_rxp; | |
1494 | int is_regular = (rx->type == BNA_RX_T_REGULAR); | |
1495 | ||
1496 | /* Start IB */ | |
1497 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1498 | rxp = (struct bna_rxp *)qe_rxp; | |
1499 | bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); | |
1500 | } | |
1501 | ||
1502 | bna_ethport_cb_rx_started(&rx->bna->ethport); | |
1503 | } | |
1504 | ||
1505 | static void | |
1506 | bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) | |
1507 | { | |
1508 | switch (event) { | |
1509 | case RX_E_STOP: | |
1510 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | |
1511 | bna_ethport_cb_rx_stopped(&rx->bna->ethport); | |
1512 | bna_rxf_stop(&rx->rxf); | |
1513 | break; | |
1514 | ||
1515 | case RX_E_FAIL: | |
1516 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1517 | bna_ethport_cb_rx_stopped(&rx->bna->ethport); | |
1518 | bna_rxf_fail(&rx->rxf); | |
1519 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); | |
1520 | break; | |
1521 | ||
1522 | default: | |
1523 | bfa_sm_fault(event); | |
1524 | break; | |
1525 | } | |
1526 | } | |
1527 | ||
1528 | static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, | |
1529 | enum bna_rx_event event) | |
1530 | { | |
1531 | switch (event) { | |
1532 | case RX_E_STOP: | |
1533 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | |
1534 | break; | |
1535 | ||
1536 | case RX_E_FAIL: | |
1537 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1538 | bna_rxf_fail(&rx->rxf); | |
1539 | rx->rx_cleanup_cbfn(rx->bna->bnad, rx); | |
1540 | break; | |
1541 | ||
1542 | case RX_E_RXF_STARTED: | |
1543 | bfa_fsm_set_state(rx, bna_rx_sm_started); | |
1544 | break; | |
1545 | ||
1546 | default: | |
1547 | bfa_sm_fault(event); | |
1548 | break; | |
1549 | } | |
1550 | } | |
1551 | ||
1552 | void | |
1553 | bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) | |
1554 | { | |
1555 | } | |
1556 | ||
1557 | void | |
1558 | bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1559 | { | |
1560 | switch (event) { | |
1561 | case RX_E_FAIL: | |
1562 | case RX_E_RXF_STOPPED: | |
1563 | /* No-op */ | |
1564 | break; | |
1565 | ||
1566 | case RX_E_CLEANUP_DONE: | |
1567 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1568 | break; | |
1569 | ||
1570 | default: | |
1571 | bfa_sm_fault(event); | |
1572 | break; | |
1573 | } | |
1574 | } | |
1575 | ||
1576 | static void | |
1577 | bna_rx_sm_failed_entry(struct bna_rx *rx) | |
1578 | { | |
1579 | } | |
1580 | ||
1581 | static void | |
1582 | bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) | |
1583 | { | |
1584 | switch (event) { | |
1585 | case RX_E_START: | |
1586 | bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); | |
1587 | break; | |
1588 | ||
1589 | case RX_E_STOP: | |
1590 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1591 | break; | |
1592 | ||
1593 | case RX_E_FAIL: | |
1594 | case RX_E_RXF_STARTED: | |
1595 | case RX_E_RXF_STOPPED: | |
1596 | /* No-op */ | |
1597 | break; | |
1598 | ||
1599 | case RX_E_CLEANUP_DONE: | |
1600 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1601 | break; | |
1602 | ||
1603 | default: | |
1604 | bfa_sm_fault(event); | |
1605 | break; | |
1606 | } } | |
1607 | ||
1608 | static void | |
1609 | bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) | |
1610 | { | |
1611 | } | |
1612 | ||
1613 | static void | |
1614 | bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1615 | { | |
1616 | switch (event) { | |
1617 | case RX_E_STOP: | |
1618 | bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); | |
1619 | break; | |
1620 | ||
1621 | case RX_E_FAIL: | |
1622 | bfa_fsm_set_state(rx, bna_rx_sm_failed); | |
1623 | break; | |
1624 | ||
1625 | case RX_E_CLEANUP_DONE: | |
1626 | bfa_fsm_set_state(rx, bna_rx_sm_start_wait); | |
1627 | break; | |
1628 | ||
1629 | default: | |
1630 | bfa_sm_fault(event); | |
1631 | break; | |
1632 | } | |
1633 | } | |
1634 | ||
1635 | static void | |
1636 | bna_bfi_rx_enet_start(struct bna_rx *rx) | |
1637 | { | |
1638 | struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; | |
1639 | struct bna_rxp *rxp = NULL; | |
1640 | struct bna_rxq *q0 = NULL, *q1 = NULL; | |
1641 | struct list_head *rxp_qe; | |
1642 | int i; | |
1643 | ||
1644 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, | |
1645 | BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); | |
1646 | cfg_req->mh.num_entries = htons( | |
1647 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); | |
1648 | ||
1649 | cfg_req->num_queue_sets = rx->num_paths; | |
1650 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); | |
1651 | i < rx->num_paths; | |
1652 | i++, rxp_qe = bfa_q_next(rxp_qe)) { | |
1653 | rxp = (struct bna_rxp *)rxp_qe; | |
1654 | ||
1655 | GET_RXQS(rxp, q0, q1); | |
1656 | switch (rxp->type) { | |
1657 | case BNA_RXP_SLR: | |
1658 | case BNA_RXP_HDS: | |
1659 | /* Small RxQ */ | |
1660 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, | |
1661 | &q1->qpt); | |
1662 | cfg_req->q_cfg[i].qs.rx_buffer_size = | |
1663 | htons((u16)q1->buffer_size); | |
1664 | /* Fall through */ | |
1665 | ||
1666 | case BNA_RXP_SINGLE: | |
1667 | /* Large/Single RxQ */ | |
1668 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, | |
1669 | &q0->qpt); | |
1670 | q0->buffer_size = | |
1671 | bna_enet_mtu_get(&rx->bna->enet); | |
1672 | cfg_req->q_cfg[i].ql.rx_buffer_size = | |
1673 | htons((u16)q0->buffer_size); | |
1674 | break; | |
1675 | ||
1676 | default: | |
1677 | BUG_ON(1); | |
1678 | } | |
1679 | ||
1680 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, | |
1681 | &rxp->cq.qpt); | |
1682 | ||
1683 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = | |
1684 | rxp->cq.ib.ib_seg_host_addr.lsb; | |
1685 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = | |
1686 | rxp->cq.ib.ib_seg_host_addr.msb; | |
1687 | cfg_req->q_cfg[i].ib.intr.msix_index = | |
1688 | htons((u16)rxp->cq.ib.intr_vector); | |
1689 | } | |
1690 | ||
1691 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; | |
1692 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; | |
1693 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; | |
1694 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; | |
1695 | cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) | |
1696 | ? BNA_STATUS_T_ENABLED : | |
1697 | BNA_STATUS_T_DISABLED; | |
1698 | cfg_req->ib_cfg.coalescing_timeout = | |
1699 | htonl((u32)rxp->cq.ib.coalescing_timeo); | |
1700 | cfg_req->ib_cfg.inter_pkt_timeout = | |
1701 | htonl((u32)rxp->cq.ib.interpkt_timeo); | |
1702 | cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; | |
1703 | ||
1704 | switch (rxp->type) { | |
1705 | case BNA_RXP_SLR: | |
1706 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; | |
1707 | break; | |
1708 | ||
1709 | case BNA_RXP_HDS: | |
1710 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; | |
1711 | cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; | |
1712 | cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; | |
1713 | cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; | |
1714 | break; | |
1715 | ||
1716 | case BNA_RXP_SINGLE: | |
1717 | cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; | |
1718 | break; | |
1719 | ||
1720 | default: | |
1721 | BUG_ON(1); | |
1722 | } | |
1723 | cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; | |
1724 | ||
1725 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, | |
1726 | sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); | |
1727 | bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); | |
1728 | } | |
1729 | ||
1730 | static void | |
1731 | bna_bfi_rx_enet_stop(struct bna_rx *rx) | |
1732 | { | |
1733 | struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; | |
1734 | ||
1735 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
1736 | BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); | |
1737 | req->mh.num_entries = htons( | |
1738 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); | |
1739 | bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), | |
1740 | &req->mh); | |
1741 | bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); | |
1742 | } | |
1743 | ||
1744 | static void | |
1745 | bna_rx_enet_stop(struct bna_rx *rx) | |
1746 | { | |
1747 | struct bna_rxp *rxp; | |
1748 | struct list_head *qe_rxp; | |
1749 | ||
1750 | /* Stop IB */ | |
1751 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1752 | rxp = (struct bna_rxp *)qe_rxp; | |
1753 | bna_ib_stop(rx->bna, &rxp->cq.ib); | |
1754 | } | |
1755 | ||
1756 | bna_bfi_rx_enet_stop(rx); | |
1757 | } | |
1758 | ||
1759 | static int | |
1760 | bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) | |
1761 | { | |
1762 | if ((rx_mod->rx_free_count == 0) || | |
1763 | (rx_mod->rxp_free_count == 0) || | |
1764 | (rx_mod->rxq_free_count == 0)) | |
1765 | return 0; | |
1766 | ||
1767 | if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { | |
1768 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
1769 | (rx_mod->rxq_free_count < rx_cfg->num_paths)) | |
1770 | return 0; | |
1771 | } else { | |
1772 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
1773 | (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) | |
1774 | return 0; | |
1775 | } | |
1776 | ||
1777 | return 1; | |
1778 | } | |
1779 | ||
1780 | static struct bna_rxq * | |
1781 | bna_rxq_get(struct bna_rx_mod *rx_mod) | |
1782 | { | |
1783 | struct bna_rxq *rxq = NULL; | |
1784 | struct list_head *qe = NULL; | |
1785 | ||
1786 | bfa_q_deq(&rx_mod->rxq_free_q, &qe); | |
1787 | rx_mod->rxq_free_count--; | |
1788 | rxq = (struct bna_rxq *)qe; | |
1789 | bfa_q_qe_init(&rxq->qe); | |
1790 | ||
1791 | return rxq; | |
1792 | } | |
1793 | ||
1794 | static void | |
1795 | bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) | |
1796 | { | |
1797 | bfa_q_qe_init(&rxq->qe); | |
1798 | list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); | |
1799 | rx_mod->rxq_free_count++; | |
1800 | } | |
1801 | ||
1802 | static struct bna_rxp * | |
1803 | bna_rxp_get(struct bna_rx_mod *rx_mod) | |
1804 | { | |
1805 | struct list_head *qe = NULL; | |
1806 | struct bna_rxp *rxp = NULL; | |
1807 | ||
1808 | bfa_q_deq(&rx_mod->rxp_free_q, &qe); | |
1809 | rx_mod->rxp_free_count--; | |
1810 | rxp = (struct bna_rxp *)qe; | |
1811 | bfa_q_qe_init(&rxp->qe); | |
1812 | ||
1813 | return rxp; | |
1814 | } | |
1815 | ||
1816 | static void | |
1817 | bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) | |
1818 | { | |
1819 | bfa_q_qe_init(&rxp->qe); | |
1820 | list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); | |
1821 | rx_mod->rxp_free_count++; | |
1822 | } | |
1823 | ||
1824 | static struct bna_rx * | |
1825 | bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
1826 | { | |
1827 | struct list_head *qe = NULL; | |
1828 | struct bna_rx *rx = NULL; | |
1829 | ||
1830 | if (type == BNA_RX_T_REGULAR) { | |
1831 | bfa_q_deq(&rx_mod->rx_free_q, &qe); | |
1832 | } else | |
1833 | bfa_q_deq_tail(&rx_mod->rx_free_q, &qe); | |
1834 | ||
1835 | rx_mod->rx_free_count--; | |
1836 | rx = (struct bna_rx *)qe; | |
1837 | bfa_q_qe_init(&rx->qe); | |
1838 | list_add_tail(&rx->qe, &rx_mod->rx_active_q); | |
1839 | rx->type = type; | |
1840 | ||
1841 | return rx; | |
1842 | } | |
1843 | ||
1844 | static void | |
1845 | bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) | |
1846 | { | |
1847 | struct list_head *prev_qe = NULL; | |
1848 | struct list_head *qe; | |
1849 | ||
1850 | bfa_q_qe_init(&rx->qe); | |
1851 | ||
1852 | list_for_each(qe, &rx_mod->rx_free_q) { | |
1853 | if (((struct bna_rx *)qe)->rid < rx->rid) | |
1854 | prev_qe = qe; | |
1855 | else | |
1856 | break; | |
1857 | } | |
1858 | ||
1859 | if (prev_qe == NULL) { | |
1860 | /* This is the first entry */ | |
1861 | bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe); | |
1862 | } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) { | |
1863 | /* This is the last entry */ | |
1864 | list_add_tail(&rx->qe, &rx_mod->rx_free_q); | |
1865 | } else { | |
1866 | /* Somewhere in the middle */ | |
1867 | bfa_q_next(&rx->qe) = bfa_q_next(prev_qe); | |
1868 | bfa_q_prev(&rx->qe) = prev_qe; | |
1869 | bfa_q_next(prev_qe) = &rx->qe; | |
1870 | bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe; | |
1871 | } | |
1872 | ||
1873 | rx_mod->rx_free_count++; | |
1874 | } | |
1875 | ||
1876 | static void | |
1877 | bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, | |
1878 | struct bna_rxq *q1) | |
1879 | { | |
1880 | switch (rxp->type) { | |
1881 | case BNA_RXP_SINGLE: | |
1882 | rxp->rxq.single.only = q0; | |
1883 | rxp->rxq.single.reserved = NULL; | |
1884 | break; | |
1885 | case BNA_RXP_SLR: | |
1886 | rxp->rxq.slr.large = q0; | |
1887 | rxp->rxq.slr.small = q1; | |
1888 | break; | |
1889 | case BNA_RXP_HDS: | |
1890 | rxp->rxq.hds.data = q0; | |
1891 | rxp->rxq.hds.hdr = q1; | |
1892 | break; | |
1893 | default: | |
1894 | break; | |
1895 | } | |
1896 | } | |
1897 | ||
1898 | static void | |
1899 | bna_rxq_qpt_setup(struct bna_rxq *rxq, | |
1900 | struct bna_rxp *rxp, | |
1901 | u32 page_count, | |
1902 | u32 page_size, | |
1903 | struct bna_mem_descr *qpt_mem, | |
1904 | struct bna_mem_descr *swqpt_mem, | |
1905 | struct bna_mem_descr *page_mem) | |
1906 | { | |
1907 | int i; | |
1908 | ||
1909 | rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
1910 | rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
1911 | rxq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
1912 | rxq->qpt.page_count = page_count; | |
1913 | rxq->qpt.page_size = page_size; | |
1914 | ||
1915 | rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; | |
1916 | ||
1917 | for (i = 0; i < rxq->qpt.page_count; i++) { | |
1918 | rxq->rcb->sw_qpt[i] = page_mem[i].kva; | |
1919 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = | |
1920 | page_mem[i].dma.lsb; | |
1921 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = | |
1922 | page_mem[i].dma.msb; | |
1923 | } | |
1924 | } | |
1925 | ||
1926 | static void | |
1927 | bna_rxp_cqpt_setup(struct bna_rxp *rxp, | |
1928 | u32 page_count, | |
1929 | u32 page_size, | |
1930 | struct bna_mem_descr *qpt_mem, | |
1931 | struct bna_mem_descr *swqpt_mem, | |
1932 | struct bna_mem_descr *page_mem) | |
1933 | { | |
1934 | int i; | |
1935 | ||
1936 | rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
1937 | rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
1938 | rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; | |
1939 | rxp->cq.qpt.page_count = page_count; | |
1940 | rxp->cq.qpt.page_size = page_size; | |
1941 | ||
1942 | rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; | |
1943 | ||
1944 | for (i = 0; i < rxp->cq.qpt.page_count; i++) { | |
1945 | rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; | |
1946 | ||
1947 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = | |
1948 | page_mem[i].dma.lsb; | |
1949 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = | |
1950 | page_mem[i].dma.msb; | |
1951 | } | |
1952 | } | |
1953 | ||
1954 | static void | |
1955 | bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) | |
1956 | { | |
1957 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
1958 | ||
1959 | bfa_wc_down(&rx_mod->rx_stop_wc); | |
1960 | } | |
1961 | ||
1962 | static void | |
1963 | bna_rx_mod_cb_rx_stopped_all(void *arg) | |
1964 | { | |
1965 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
1966 | ||
1967 | if (rx_mod->stop_cbfn) | |
1968 | rx_mod->stop_cbfn(&rx_mod->bna->enet); | |
1969 | rx_mod->stop_cbfn = NULL; | |
1970 | } | |
1971 | ||
1972 | static void | |
1973 | bna_rx_start(struct bna_rx *rx) | |
1974 | { | |
1975 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
1976 | if (rx->rx_flags & BNA_RX_F_ENABLED) | |
1977 | bfa_fsm_send_event(rx, RX_E_START); | |
1978 | } | |
1979 | ||
1980 | static void | |
1981 | bna_rx_stop(struct bna_rx *rx) | |
1982 | { | |
1983 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; | |
1984 | if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) | |
1985 | bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); | |
1986 | else { | |
1987 | rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; | |
1988 | rx->stop_cbarg = &rx->bna->rx_mod; | |
1989 | bfa_fsm_send_event(rx, RX_E_STOP); | |
1990 | } | |
1991 | } | |
1992 | ||
1993 | static void | |
1994 | bna_rx_fail(struct bna_rx *rx) | |
1995 | { | |
1996 | /* Indicate Enet is not enabled, and failed */ | |
1997 | rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; | |
1998 | bfa_fsm_send_event(rx, RX_E_FAIL); | |
1999 | } | |
2000 | ||
2001 | void | |
2002 | bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2003 | { | |
2004 | struct bna_rx *rx; | |
2005 | struct list_head *qe; | |
2006 | ||
2007 | rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; | |
2008 | if (type == BNA_RX_T_LOOPBACK) | |
2009 | rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; | |
2010 | ||
2011 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2012 | rx = (struct bna_rx *)qe; | |
2013 | if (rx->type == type) | |
2014 | bna_rx_start(rx); | |
2015 | } | |
2016 | } | |
2017 | ||
2018 | void | |
2019 | bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2020 | { | |
2021 | struct bna_rx *rx; | |
2022 | struct list_head *qe; | |
2023 | ||
2024 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; | |
2025 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; | |
2026 | ||
2027 | rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; | |
2028 | ||
2029 | bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); | |
2030 | ||
2031 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2032 | rx = (struct bna_rx *)qe; | |
2033 | if (rx->type == type) { | |
2034 | bfa_wc_up(&rx_mod->rx_stop_wc); | |
2035 | bna_rx_stop(rx); | |
2036 | } | |
2037 | } | |
2038 | ||
2039 | bfa_wc_wait(&rx_mod->rx_stop_wc); | |
2040 | } | |
2041 | ||
2042 | void | |
2043 | bna_rx_mod_fail(struct bna_rx_mod *rx_mod) | |
2044 | { | |
2045 | struct bna_rx *rx; | |
2046 | struct list_head *qe; | |
2047 | ||
2048 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; | |
2049 | rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; | |
2050 | ||
2051 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2052 | rx = (struct bna_rx *)qe; | |
2053 | bna_rx_fail(rx); | |
2054 | } | |
2055 | } | |
2056 | ||
2057 | void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, | |
2058 | struct bna_res_info *res_info) | |
2059 | { | |
2060 | int index; | |
2061 | struct bna_rx *rx_ptr; | |
2062 | struct bna_rxp *rxp_ptr; | |
2063 | struct bna_rxq *rxq_ptr; | |
2064 | ||
2065 | rx_mod->bna = bna; | |
2066 | rx_mod->flags = 0; | |
2067 | ||
2068 | rx_mod->rx = (struct bna_rx *) | |
2069 | res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; | |
2070 | rx_mod->rxp = (struct bna_rxp *) | |
2071 | res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; | |
2072 | rx_mod->rxq = (struct bna_rxq *) | |
2073 | res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
2074 | ||
2075 | /* Initialize the queues */ | |
2076 | INIT_LIST_HEAD(&rx_mod->rx_free_q); | |
2077 | rx_mod->rx_free_count = 0; | |
2078 | INIT_LIST_HEAD(&rx_mod->rxq_free_q); | |
2079 | rx_mod->rxq_free_count = 0; | |
2080 | INIT_LIST_HEAD(&rx_mod->rxp_free_q); | |
2081 | rx_mod->rxp_free_count = 0; | |
2082 | INIT_LIST_HEAD(&rx_mod->rx_active_q); | |
2083 | ||
2084 | /* Build RX queues */ | |
2085 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { | |
2086 | rx_ptr = &rx_mod->rx[index]; | |
2087 | ||
2088 | bfa_q_qe_init(&rx_ptr->qe); | |
2089 | INIT_LIST_HEAD(&rx_ptr->rxp_q); | |
2090 | rx_ptr->bna = NULL; | |
2091 | rx_ptr->rid = index; | |
2092 | rx_ptr->stop_cbfn = NULL; | |
2093 | rx_ptr->stop_cbarg = NULL; | |
2094 | ||
2095 | list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); | |
2096 | rx_mod->rx_free_count++; | |
2097 | } | |
2098 | ||
2099 | /* build RX-path queue */ | |
2100 | for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { | |
2101 | rxp_ptr = &rx_mod->rxp[index]; | |
2102 | bfa_q_qe_init(&rxp_ptr->qe); | |
2103 | list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); | |
2104 | rx_mod->rxp_free_count++; | |
2105 | } | |
2106 | ||
2107 | /* build RXQ queue */ | |
2108 | for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { | |
2109 | rxq_ptr = &rx_mod->rxq[index]; | |
2110 | bfa_q_qe_init(&rxq_ptr->qe); | |
2111 | list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); | |
2112 | rx_mod->rxq_free_count++; | |
2113 | } | |
2114 | } | |
2115 | ||
2116 | void | |
2117 | bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) | |
2118 | { | |
2119 | struct list_head *qe; | |
2120 | int i; | |
2121 | ||
2122 | i = 0; | |
2123 | list_for_each(qe, &rx_mod->rx_free_q) | |
2124 | i++; | |
2125 | ||
2126 | i = 0; | |
2127 | list_for_each(qe, &rx_mod->rxp_free_q) | |
2128 | i++; | |
2129 | ||
2130 | i = 0; | |
2131 | list_for_each(qe, &rx_mod->rxq_free_q) | |
2132 | i++; | |
2133 | ||
2134 | rx_mod->bna = NULL; | |
2135 | } | |
2136 | ||
2137 | void | |
2138 | bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) | |
2139 | { | |
2140 | struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; | |
2141 | struct bna_rxp *rxp = NULL; | |
2142 | struct bna_rxq *q0 = NULL, *q1 = NULL; | |
2143 | struct list_head *rxp_qe; | |
2144 | int i; | |
2145 | ||
2146 | bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, | |
2147 | sizeof(struct bfi_enet_rx_cfg_rsp)); | |
2148 | ||
2149 | rx->hw_id = cfg_rsp->hw_id; | |
2150 | ||
2151 | for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); | |
2152 | i < rx->num_paths; | |
2153 | i++, rxp_qe = bfa_q_next(rxp_qe)) { | |
2154 | rxp = (struct bna_rxp *)rxp_qe; | |
2155 | GET_RXQS(rxp, q0, q1); | |
2156 | ||
2157 | /* Setup doorbells */ | |
2158 | rxp->cq.ccb->i_dbell->doorbell_addr = | |
2159 | rx->bna->pcidev.pci_bar_kva | |
2160 | + ntohl(cfg_rsp->q_handles[i].i_dbell); | |
2161 | rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; | |
2162 | q0->rcb->q_dbell = | |
2163 | rx->bna->pcidev.pci_bar_kva | |
2164 | + ntohl(cfg_rsp->q_handles[i].ql_dbell); | |
2165 | q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; | |
2166 | if (q1) { | |
2167 | q1->rcb->q_dbell = | |
2168 | rx->bna->pcidev.pci_bar_kva | |
2169 | + ntohl(cfg_rsp->q_handles[i].qs_dbell); | |
2170 | q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; | |
2171 | } | |
2172 | ||
2173 | /* Initialize producer/consumer indexes */ | |
2174 | (*rxp->cq.ccb->hw_producer_index) = 0; | |
2175 | rxp->cq.ccb->producer_index = 0; | |
2176 | q0->rcb->producer_index = q0->rcb->consumer_index = 0; | |
2177 | if (q1) | |
2178 | q1->rcb->producer_index = q1->rcb->consumer_index = 0; | |
2179 | } | |
2180 | ||
2181 | bfa_fsm_send_event(rx, RX_E_STARTED); | |
2182 | } | |
2183 | ||
2184 | void | |
2185 | bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) | |
2186 | { | |
2187 | bfa_fsm_send_event(rx, RX_E_STOPPED); | |
2188 | } | |
2189 | ||
2190 | void | |
2191 | bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) | |
2192 | { | |
2193 | u32 cq_size, hq_size, dq_size; | |
2194 | u32 cpage_count, hpage_count, dpage_count; | |
2195 | struct bna_mem_info *mem_info; | |
2196 | u32 cq_depth; | |
2197 | u32 hq_depth; | |
2198 | u32 dq_depth; | |
2199 | ||
2200 | dq_depth = q_cfg->q_depth; | |
2201 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); | |
2202 | cq_depth = dq_depth + hq_depth; | |
2203 | ||
2204 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | |
2205 | cq_size = cq_depth * BFI_CQ_WI_SIZE; | |
2206 | cq_size = ALIGN(cq_size, PAGE_SIZE); | |
2207 | cpage_count = SIZE_TO_PAGES(cq_size); | |
2208 | ||
2209 | BNA_TO_POWER_OF_2_HIGH(dq_depth); | |
2210 | dq_size = dq_depth * BFI_RXQ_WI_SIZE; | |
2211 | dq_size = ALIGN(dq_size, PAGE_SIZE); | |
2212 | dpage_count = SIZE_TO_PAGES(dq_size); | |
2213 | ||
2214 | if (BNA_RXP_SINGLE != q_cfg->rxp_type) { | |
2215 | BNA_TO_POWER_OF_2_HIGH(hq_depth); | |
2216 | hq_size = hq_depth * BFI_RXQ_WI_SIZE; | |
2217 | hq_size = ALIGN(hq_size, PAGE_SIZE); | |
2218 | hpage_count = SIZE_TO_PAGES(hq_size); | |
2219 | } else | |
2220 | hpage_count = 0; | |
2221 | ||
2222 | res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; | |
2223 | mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; | |
2224 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2225 | mem_info->len = sizeof(struct bna_ccb); | |
2226 | mem_info->num = q_cfg->num_paths; | |
2227 | ||
2228 | res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; | |
2229 | mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; | |
2230 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2231 | mem_info->len = sizeof(struct bna_rcb); | |
2232 | mem_info->num = BNA_GET_RXQS(q_cfg); | |
2233 | ||
2234 | res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; | |
2235 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; | |
2236 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2237 | mem_info->len = cpage_count * sizeof(struct bna_dma_addr); | |
2238 | mem_info->num = q_cfg->num_paths; | |
2239 | ||
2240 | res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; | |
2241 | mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; | |
2242 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2243 | mem_info->len = cpage_count * sizeof(void *); | |
2244 | mem_info->num = q_cfg->num_paths; | |
2245 | ||
2246 | res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; | |
2247 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; | |
2248 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2249 | mem_info->len = PAGE_SIZE; | |
2250 | mem_info->num = cpage_count * q_cfg->num_paths; | |
2251 | ||
2252 | res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; | |
2253 | mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; | |
2254 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2255 | mem_info->len = dpage_count * sizeof(struct bna_dma_addr); | |
2256 | mem_info->num = q_cfg->num_paths; | |
2257 | ||
2258 | res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; | |
2259 | mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; | |
2260 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2261 | mem_info->len = dpage_count * sizeof(void *); | |
2262 | mem_info->num = q_cfg->num_paths; | |
2263 | ||
2264 | res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; | |
2265 | mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; | |
2266 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2267 | mem_info->len = PAGE_SIZE; | |
2268 | mem_info->num = dpage_count * q_cfg->num_paths; | |
2269 | ||
2270 | res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; | |
2271 | mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; | |
2272 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2273 | mem_info->len = hpage_count * sizeof(struct bna_dma_addr); | |
2274 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2275 | ||
2276 | res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; | |
2277 | mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; | |
2278 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2279 | mem_info->len = hpage_count * sizeof(void *); | |
2280 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2281 | ||
2282 | res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; | |
2283 | mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; | |
2284 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2285 | mem_info->len = (hpage_count ? PAGE_SIZE : 0); | |
2286 | mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); | |
2287 | ||
2288 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; | |
2289 | mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; | |
2290 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2291 | mem_info->len = BFI_IBIDX_SIZE; | |
2292 | mem_info->num = q_cfg->num_paths; | |
2293 | ||
2294 | res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; | |
2295 | mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; | |
2296 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2297 | mem_info->len = BFI_ENET_RSS_RIT_MAX; | |
2298 | mem_info->num = 1; | |
2299 | ||
2300 | res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; | |
2301 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; | |
2302 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; | |
2303 | } | |
2304 | ||
2305 | struct bna_rx * | |
2306 | bna_rx_create(struct bna *bna, struct bnad *bnad, | |
2307 | struct bna_rx_config *rx_cfg, | |
d91d25d5 | 2308 | const struct bna_rx_event_cbfn *rx_cbfn, |
f3bd5173 RM |
2309 | struct bna_res_info *res_info, |
2310 | void *priv) | |
2311 | { | |
2312 | struct bna_rx_mod *rx_mod = &bna->rx_mod; | |
2313 | struct bna_rx *rx; | |
2314 | struct bna_rxp *rxp; | |
2315 | struct bna_rxq *q0; | |
2316 | struct bna_rxq *q1; | |
2317 | struct bna_intr_info *intr_info; | |
2318 | u32 page_count; | |
2319 | struct bna_mem_descr *ccb_mem; | |
2320 | struct bna_mem_descr *rcb_mem; | |
2321 | struct bna_mem_descr *unmapq_mem; | |
2322 | struct bna_mem_descr *cqpt_mem; | |
2323 | struct bna_mem_descr *cswqpt_mem; | |
2324 | struct bna_mem_descr *cpage_mem; | |
2325 | struct bna_mem_descr *hqpt_mem; | |
2326 | struct bna_mem_descr *dqpt_mem; | |
2327 | struct bna_mem_descr *hsqpt_mem; | |
2328 | struct bna_mem_descr *dsqpt_mem; | |
2329 | struct bna_mem_descr *hpage_mem; | |
2330 | struct bna_mem_descr *dpage_mem; | |
2331 | int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; | |
2332 | int dpage_count, hpage_count, rcb_idx; | |
2333 | ||
2334 | if (!bna_rx_res_check(rx_mod, rx_cfg)) | |
2335 | return NULL; | |
2336 | ||
2337 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
2338 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; | |
2339 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; | |
2340 | unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; | |
2341 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; | |
2342 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; | |
2343 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; | |
2344 | hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; | |
2345 | dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; | |
2346 | hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; | |
2347 | dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; | |
2348 | hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; | |
2349 | dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; | |
2350 | ||
2351 | page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / | |
2352 | rx_cfg->num_paths; | |
2353 | ||
2354 | dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / | |
2355 | rx_cfg->num_paths; | |
2356 | ||
2357 | hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / | |
2358 | rx_cfg->num_paths; | |
2359 | ||
2360 | rx = bna_rx_get(rx_mod, rx_cfg->rx_type); | |
2361 | rx->bna = bna; | |
2362 | rx->rx_flags = 0; | |
2363 | INIT_LIST_HEAD(&rx->rxp_q); | |
2364 | rx->stop_cbfn = NULL; | |
2365 | rx->stop_cbarg = NULL; | |
2366 | rx->priv = priv; | |
2367 | ||
2368 | rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; | |
2369 | rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; | |
2370 | rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; | |
2371 | rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; | |
2372 | /* Following callbacks are mandatory */ | |
2373 | rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; | |
2374 | rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; | |
2375 | ||
2376 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { | |
2377 | switch (rx->type) { | |
2378 | case BNA_RX_T_REGULAR: | |
2379 | if (!(rx->bna->rx_mod.flags & | |
2380 | BNA_RX_MOD_F_ENET_LOOPBACK)) | |
2381 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
2382 | break; | |
2383 | case BNA_RX_T_LOOPBACK: | |
2384 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) | |
2385 | rx->rx_flags |= BNA_RX_F_ENET_STARTED; | |
2386 | break; | |
2387 | } | |
2388 | } | |
2389 | ||
2390 | rx->num_paths = rx_cfg->num_paths; | |
2391 | for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { | |
2392 | rxp = bna_rxp_get(rx_mod); | |
2393 | list_add_tail(&rxp->qe, &rx->rxp_q); | |
2394 | rxp->type = rx_cfg->rxp_type; | |
2395 | rxp->rx = rx; | |
2396 | rxp->cq.rx = rx; | |
2397 | ||
2398 | q0 = bna_rxq_get(rx_mod); | |
2399 | if (BNA_RXP_SINGLE == rx_cfg->rxp_type) | |
2400 | q1 = NULL; | |
2401 | else | |
2402 | q1 = bna_rxq_get(rx_mod); | |
2403 | ||
2404 | if (1 == intr_info->num) | |
2405 | rxp->vector = intr_info->idl[0].vector; | |
2406 | else | |
2407 | rxp->vector = intr_info->idl[i].vector; | |
2408 | ||
2409 | /* Setup IB */ | |
2410 | ||
2411 | rxp->cq.ib.ib_seg_host_addr.lsb = | |
2412 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; | |
2413 | rxp->cq.ib.ib_seg_host_addr.msb = | |
2414 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; | |
2415 | rxp->cq.ib.ib_seg_host_addr_kva = | |
2416 | res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; | |
2417 | rxp->cq.ib.intr_type = intr_info->intr_type; | |
2418 | if (intr_info->intr_type == BNA_INTR_T_MSIX) | |
2419 | rxp->cq.ib.intr_vector = rxp->vector; | |
2420 | else | |
2421 | rxp->cq.ib.intr_vector = (1 << rxp->vector); | |
2422 | rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; | |
2423 | rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; | |
2424 | rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; | |
2425 | ||
2426 | bna_rxp_add_rxqs(rxp, q0, q1); | |
2427 | ||
2428 | /* Setup large Q */ | |
2429 | ||
2430 | q0->rx = rx; | |
2431 | q0->rxp = rxp; | |
2432 | ||
2433 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
2434 | q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; | |
2435 | rcb_idx++; | |
2436 | q0->rcb->q_depth = rx_cfg->q_depth; | |
2437 | q0->rcb->rxq = q0; | |
2438 | q0->rcb->bnad = bna->bnad; | |
2439 | q0->rcb->id = 0; | |
2440 | q0->rx_packets = q0->rx_bytes = 0; | |
2441 | q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; | |
2442 | ||
2443 | bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, | |
2444 | &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); | |
2445 | q0->rcb->page_idx = dpage_idx; | |
2446 | q0->rcb->page_count = dpage_count; | |
2447 | dpage_idx += dpage_count; | |
2448 | ||
2449 | if (rx->rcb_setup_cbfn) | |
2450 | rx->rcb_setup_cbfn(bnad, q0->rcb); | |
2451 | ||
2452 | /* Setup small Q */ | |
2453 | ||
2454 | if (q1) { | |
2455 | q1->rx = rx; | |
2456 | q1->rxp = rxp; | |
2457 | ||
2458 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
2459 | q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; | |
2460 | rcb_idx++; | |
2461 | q1->rcb->q_depth = rx_cfg->q_depth; | |
2462 | q1->rcb->rxq = q1; | |
2463 | q1->rcb->bnad = bna->bnad; | |
2464 | q1->rcb->id = 1; | |
2465 | q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? | |
2466 | rx_cfg->hds_config.forced_offset | |
2467 | : rx_cfg->small_buff_size; | |
2468 | q1->rx_packets = q1->rx_bytes = 0; | |
2469 | q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; | |
2470 | ||
2471 | bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, | |
2472 | &hqpt_mem[i], &hsqpt_mem[i], | |
2473 | &hpage_mem[hpage_idx]); | |
2474 | q1->rcb->page_idx = hpage_idx; | |
2475 | q1->rcb->page_count = hpage_count; | |
2476 | hpage_idx += hpage_count; | |
2477 | ||
2478 | if (rx->rcb_setup_cbfn) | |
2479 | rx->rcb_setup_cbfn(bnad, q1->rcb); | |
2480 | } | |
2481 | ||
2482 | /* Setup CQ */ | |
2483 | ||
2484 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; | |
2485 | rxp->cq.ccb->q_depth = rx_cfg->q_depth + | |
2486 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? | |
2487 | 0 : rx_cfg->q_depth); | |
2488 | rxp->cq.ccb->cq = &rxp->cq; | |
2489 | rxp->cq.ccb->rcb[0] = q0->rcb; | |
2490 | q0->rcb->ccb = rxp->cq.ccb; | |
2491 | if (q1) { | |
2492 | rxp->cq.ccb->rcb[1] = q1->rcb; | |
2493 | q1->rcb->ccb = rxp->cq.ccb; | |
2494 | } | |
2495 | rxp->cq.ccb->hw_producer_index = | |
2496 | (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; | |
2497 | rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; | |
2498 | rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; | |
2499 | rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; | |
2500 | rxp->cq.ccb->rx_coalescing_timeo = | |
2501 | rxp->cq.ib.coalescing_timeo; | |
2502 | rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; | |
2503 | rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; | |
2504 | rxp->cq.ccb->bnad = bna->bnad; | |
2505 | rxp->cq.ccb->id = i; | |
2506 | ||
2507 | bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, | |
2508 | &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); | |
2509 | rxp->cq.ccb->page_idx = cpage_idx; | |
2510 | rxp->cq.ccb->page_count = page_count; | |
2511 | cpage_idx += page_count; | |
2512 | ||
2513 | if (rx->ccb_setup_cbfn) | |
2514 | rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); | |
2515 | } | |
2516 | ||
2517 | rx->hds_cfg = rx_cfg->hds_config; | |
2518 | ||
2519 | bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); | |
2520 | ||
2521 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
2522 | ||
2523 | rx_mod->rid_mask |= (1 << rx->rid); | |
2524 | ||
2525 | return rx; | |
2526 | } | |
2527 | ||
2528 | void | |
2529 | bna_rx_destroy(struct bna_rx *rx) | |
2530 | { | |
2531 | struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; | |
2532 | struct bna_rxq *q0 = NULL; | |
2533 | struct bna_rxq *q1 = NULL; | |
2534 | struct bna_rxp *rxp; | |
2535 | struct list_head *qe; | |
2536 | ||
2537 | bna_rxf_uninit(&rx->rxf); | |
2538 | ||
2539 | while (!list_empty(&rx->rxp_q)) { | |
2540 | bfa_q_deq(&rx->rxp_q, &rxp); | |
2541 | GET_RXQS(rxp, q0, q1); | |
2542 | if (rx->rcb_destroy_cbfn) | |
2543 | rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); | |
2544 | q0->rcb = NULL; | |
2545 | q0->rxp = NULL; | |
2546 | q0->rx = NULL; | |
2547 | bna_rxq_put(rx_mod, q0); | |
2548 | ||
2549 | if (q1) { | |
2550 | if (rx->rcb_destroy_cbfn) | |
2551 | rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); | |
2552 | q1->rcb = NULL; | |
2553 | q1->rxp = NULL; | |
2554 | q1->rx = NULL; | |
2555 | bna_rxq_put(rx_mod, q1); | |
2556 | } | |
2557 | rxp->rxq.slr.large = NULL; | |
2558 | rxp->rxq.slr.small = NULL; | |
2559 | ||
2560 | if (rx->ccb_destroy_cbfn) | |
2561 | rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); | |
2562 | rxp->cq.ccb = NULL; | |
2563 | rxp->rx = NULL; | |
2564 | bna_rxp_put(rx_mod, rxp); | |
2565 | } | |
2566 | ||
2567 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2568 | if (qe == &rx->qe) { | |
2569 | list_del(&rx->qe); | |
2570 | bfa_q_qe_init(&rx->qe); | |
2571 | break; | |
2572 | } | |
2573 | } | |
2574 | ||
2575 | rx_mod->rid_mask &= ~(1 << rx->rid); | |
2576 | ||
2577 | rx->bna = NULL; | |
2578 | rx->priv = NULL; | |
2579 | bna_rx_put(rx_mod, rx); | |
2580 | } | |
2581 | ||
2582 | void | |
2583 | bna_rx_enable(struct bna_rx *rx) | |
2584 | { | |
2585 | if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) | |
2586 | return; | |
2587 | ||
2588 | rx->rx_flags |= BNA_RX_F_ENABLED; | |
2589 | if (rx->rx_flags & BNA_RX_F_ENET_STARTED) | |
2590 | bfa_fsm_send_event(rx, RX_E_START); | |
2591 | } | |
2592 | ||
2593 | void | |
2594 | bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, | |
2595 | void (*cbfn)(void *, struct bna_rx *)) | |
2596 | { | |
2597 | if (type == BNA_SOFT_CLEANUP) { | |
2598 | /* h/w should not be accessed. Treat we're stopped */ | |
2599 | (*cbfn)(rx->bna->bnad, rx); | |
2600 | } else { | |
2601 | rx->stop_cbfn = cbfn; | |
2602 | rx->stop_cbarg = rx->bna->bnad; | |
2603 | ||
2604 | rx->rx_flags &= ~BNA_RX_F_ENABLED; | |
2605 | ||
2606 | bfa_fsm_send_event(rx, RX_E_STOP); | |
2607 | } | |
2608 | } | |
2609 | ||
2610 | void | |
2611 | bna_rx_cleanup_complete(struct bna_rx *rx) | |
2612 | { | |
2613 | bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); | |
2614 | } | |
2615 | ||
2616 | enum bna_cb_status | |
2617 | bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, | |
2618 | enum bna_rxmode bitmask, | |
2619 | void (*cbfn)(struct bnad *, struct bna_rx *)) | |
2620 | { | |
2621 | struct bna_rxf *rxf = &rx->rxf; | |
2622 | int need_hw_config = 0; | |
2623 | ||
2624 | /* Error checks */ | |
2625 | ||
2626 | if (is_promisc_enable(new_mode, bitmask)) { | |
2627 | /* If promisc mode is already enabled elsewhere in the system */ | |
2628 | if ((rx->bna->promisc_rid != BFI_INVALID_RID) && | |
2629 | (rx->bna->promisc_rid != rxf->rx->rid)) | |
2630 | goto err_return; | |
2631 | ||
2632 | /* If default mode is already enabled in the system */ | |
2633 | if (rx->bna->default_mode_rid != BFI_INVALID_RID) | |
2634 | goto err_return; | |
2635 | ||
2636 | /* Trying to enable promiscuous and default mode together */ | |
2637 | if (is_default_enable(new_mode, bitmask)) | |
2638 | goto err_return; | |
2639 | } | |
2640 | ||
2641 | if (is_default_enable(new_mode, bitmask)) { | |
2642 | /* If default mode is already enabled elsewhere in the system */ | |
2643 | if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && | |
2644 | (rx->bna->default_mode_rid != rxf->rx->rid)) { | |
2645 | goto err_return; | |
2646 | } | |
2647 | ||
2648 | /* If promiscuous mode is already enabled in the system */ | |
2649 | if (rx->bna->promisc_rid != BFI_INVALID_RID) | |
2650 | goto err_return; | |
2651 | } | |
2652 | ||
2653 | /* Process the commands */ | |
2654 | ||
2655 | if (is_promisc_enable(new_mode, bitmask)) { | |
2656 | if (bna_rxf_promisc_enable(rxf)) | |
2657 | need_hw_config = 1; | |
2658 | } else if (is_promisc_disable(new_mode, bitmask)) { | |
2659 | if (bna_rxf_promisc_disable(rxf)) | |
2660 | need_hw_config = 1; | |
2661 | } | |
2662 | ||
2663 | if (is_allmulti_enable(new_mode, bitmask)) { | |
2664 | if (bna_rxf_allmulti_enable(rxf)) | |
2665 | need_hw_config = 1; | |
2666 | } else if (is_allmulti_disable(new_mode, bitmask)) { | |
2667 | if (bna_rxf_allmulti_disable(rxf)) | |
2668 | need_hw_config = 1; | |
2669 | } | |
2670 | ||
2671 | /* Trigger h/w if needed */ | |
2672 | ||
2673 | if (need_hw_config) { | |
2674 | rxf->cam_fltr_cbfn = cbfn; | |
2675 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
2676 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2677 | } else if (cbfn) | |
2678 | (*cbfn)(rx->bna->bnad, rx); | |
2679 | ||
2680 | return BNA_CB_SUCCESS; | |
2681 | ||
2682 | err_return: | |
2683 | return BNA_CB_FAIL; | |
2684 | } | |
2685 | ||
2686 | void | |
2687 | bna_rx_vlanfilter_enable(struct bna_rx *rx) | |
2688 | { | |
2689 | struct bna_rxf *rxf = &rx->rxf; | |
2690 | ||
2691 | if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { | |
2692 | rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; | |
2693 | rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; | |
2694 | bfa_fsm_send_event(rxf, RXF_E_CONFIG); | |
2695 | } | |
2696 | } | |
2697 | ||
2698 | void | |
2699 | bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) | |
2700 | { | |
2701 | struct bna_rxp *rxp; | |
2702 | struct list_head *qe; | |
2703 | ||
2704 | list_for_each(qe, &rx->rxp_q) { | |
2705 | rxp = (struct bna_rxp *)qe; | |
2706 | rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; | |
2707 | bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); | |
2708 | } | |
2709 | } | |
2710 | ||
2711 | void | |
2712 | bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) | |
2713 | { | |
2714 | int i, j; | |
2715 | ||
2716 | for (i = 0; i < BNA_LOAD_T_MAX; i++) | |
2717 | for (j = 0; j < BNA_BIAS_T_MAX; j++) | |
2718 | bna->rx_mod.dim_vector[i][j] = vector[i][j]; | |
2719 | } | |
2720 | ||
2721 | void | |
2722 | bna_rx_dim_update(struct bna_ccb *ccb) | |
2723 | { | |
2724 | struct bna *bna = ccb->cq->rx->bna; | |
2725 | u32 load, bias; | |
2726 | u32 pkt_rt, small_rt, large_rt; | |
2727 | u8 coalescing_timeo; | |
2728 | ||
2729 | if ((ccb->pkt_rate.small_pkt_cnt == 0) && | |
2730 | (ccb->pkt_rate.large_pkt_cnt == 0)) | |
2731 | return; | |
2732 | ||
2733 | /* Arrive at preconfigured coalescing timeo value based on pkt rate */ | |
2734 | ||
2735 | small_rt = ccb->pkt_rate.small_pkt_cnt; | |
2736 | large_rt = ccb->pkt_rate.large_pkt_cnt; | |
2737 | ||
2738 | pkt_rt = small_rt + large_rt; | |
2739 | ||
2740 | if (pkt_rt < BNA_PKT_RATE_10K) | |
2741 | load = BNA_LOAD_T_LOW_4; | |
2742 | else if (pkt_rt < BNA_PKT_RATE_20K) | |
2743 | load = BNA_LOAD_T_LOW_3; | |
2744 | else if (pkt_rt < BNA_PKT_RATE_30K) | |
2745 | load = BNA_LOAD_T_LOW_2; | |
2746 | else if (pkt_rt < BNA_PKT_RATE_40K) | |
2747 | load = BNA_LOAD_T_LOW_1; | |
2748 | else if (pkt_rt < BNA_PKT_RATE_50K) | |
2749 | load = BNA_LOAD_T_HIGH_1; | |
2750 | else if (pkt_rt < BNA_PKT_RATE_60K) | |
2751 | load = BNA_LOAD_T_HIGH_2; | |
2752 | else if (pkt_rt < BNA_PKT_RATE_80K) | |
2753 | load = BNA_LOAD_T_HIGH_3; | |
2754 | else | |
2755 | load = BNA_LOAD_T_HIGH_4; | |
2756 | ||
2757 | if (small_rt > (large_rt << 1)) | |
2758 | bias = 0; | |
2759 | else | |
2760 | bias = 1; | |
2761 | ||
2762 | ccb->pkt_rate.small_pkt_cnt = 0; | |
2763 | ccb->pkt_rate.large_pkt_cnt = 0; | |
2764 | ||
2765 | coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; | |
2766 | ccb->rx_coalescing_timeo = coalescing_timeo; | |
2767 | ||
2768 | /* Set it to IB */ | |
2769 | bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); | |
2770 | } | |
2771 | ||
2772 | const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { | |
2773 | {12, 12}, | |
2774 | {6, 10}, | |
2775 | {5, 10}, | |
2776 | {4, 8}, | |
2777 | {3, 6}, | |
2778 | {3, 6}, | |
2779 | {2, 4}, | |
2780 | {1, 2}, | |
2781 | }; | |
2782 | ||
2783 | /** | |
2784 | * TX | |
2785 | */ | |
2786 | #define call_tx_stop_cbfn(tx) \ | |
2787 | do { \ | |
2788 | if ((tx)->stop_cbfn) { \ | |
2789 | void (*cbfn)(void *, struct bna_tx *); \ | |
2790 | void *cbarg; \ | |
2791 | cbfn = (tx)->stop_cbfn; \ | |
2792 | cbarg = (tx)->stop_cbarg; \ | |
2793 | (tx)->stop_cbfn = NULL; \ | |
2794 | (tx)->stop_cbarg = NULL; \ | |
2795 | cbfn(cbarg, (tx)); \ | |
2796 | } \ | |
2797 | } while (0) | |
2798 | ||
2799 | #define call_tx_prio_change_cbfn(tx) \ | |
2800 | do { \ | |
2801 | if ((tx)->prio_change_cbfn) { \ | |
2802 | void (*cbfn)(struct bnad *, struct bna_tx *); \ | |
2803 | cbfn = (tx)->prio_change_cbfn; \ | |
2804 | (tx)->prio_change_cbfn = NULL; \ | |
2805 | cbfn((tx)->bna->bnad, (tx)); \ | |
2806 | } \ | |
2807 | } while (0) | |
2808 | ||
2809 | static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); | |
2810 | static void bna_bfi_tx_enet_start(struct bna_tx *tx); | |
2811 | static void bna_tx_enet_stop(struct bna_tx *tx); | |
2812 | ||
2813 | enum bna_tx_event { | |
2814 | TX_E_START = 1, | |
2815 | TX_E_STOP = 2, | |
2816 | TX_E_FAIL = 3, | |
2817 | TX_E_STARTED = 4, | |
2818 | TX_E_STOPPED = 5, | |
2819 | TX_E_PRIO_CHANGE = 6, | |
2820 | TX_E_CLEANUP_DONE = 7, | |
2821 | TX_E_BW_UPDATE = 8, | |
2822 | }; | |
2823 | ||
2824 | bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); | |
2825 | bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); | |
2826 | bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); | |
2827 | bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); | |
2828 | bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, | |
2829 | enum bna_tx_event); | |
2830 | bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, | |
2831 | enum bna_tx_event); | |
2832 | bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, | |
2833 | enum bna_tx_event); | |
2834 | bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); | |
2835 | bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, | |
2836 | enum bna_tx_event); | |
2837 | ||
2838 | static void | |
2839 | bna_tx_sm_stopped_entry(struct bna_tx *tx) | |
2840 | { | |
2841 | call_tx_stop_cbfn(tx); | |
2842 | } | |
2843 | ||
2844 | static void | |
2845 | bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) | |
2846 | { | |
2847 | switch (event) { | |
2848 | case TX_E_START: | |
2849 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
2850 | break; | |
2851 | ||
2852 | case TX_E_STOP: | |
2853 | call_tx_stop_cbfn(tx); | |
2854 | break; | |
2855 | ||
2856 | case TX_E_FAIL: | |
2857 | /* No-op */ | |
2858 | break; | |
2859 | ||
2860 | case TX_E_PRIO_CHANGE: | |
2861 | call_tx_prio_change_cbfn(tx); | |
2862 | break; | |
2863 | ||
2864 | case TX_E_BW_UPDATE: | |
2865 | /* No-op */ | |
2866 | break; | |
2867 | ||
2868 | default: | |
2869 | bfa_sm_fault(event); | |
2870 | } | |
2871 | } | |
2872 | ||
2873 | static void | |
2874 | bna_tx_sm_start_wait_entry(struct bna_tx *tx) | |
2875 | { | |
2876 | bna_bfi_tx_enet_start(tx); | |
2877 | } | |
2878 | ||
2879 | static void | |
2880 | bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) | |
2881 | { | |
2882 | switch (event) { | |
2883 | case TX_E_STOP: | |
2884 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); | |
2885 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
2886 | break; | |
2887 | ||
2888 | case TX_E_FAIL: | |
2889 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); | |
2890 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
2891 | break; | |
2892 | ||
2893 | case TX_E_STARTED: | |
2894 | if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { | |
2895 | tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | | |
2896 | BNA_TX_F_BW_UPDATED); | |
2897 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); | |
2898 | } else | |
2899 | bfa_fsm_set_state(tx, bna_tx_sm_started); | |
2900 | break; | |
2901 | ||
2902 | case TX_E_PRIO_CHANGE: | |
2903 | tx->flags |= BNA_TX_F_PRIO_CHANGED; | |
2904 | break; | |
2905 | ||
2906 | case TX_E_BW_UPDATE: | |
2907 | tx->flags |= BNA_TX_F_BW_UPDATED; | |
2908 | break; | |
2909 | ||
2910 | default: | |
2911 | bfa_sm_fault(event); | |
2912 | } | |
2913 | } | |
2914 | ||
2915 | static void | |
2916 | bna_tx_sm_started_entry(struct bna_tx *tx) | |
2917 | { | |
2918 | struct bna_txq *txq; | |
2919 | struct list_head *qe; | |
2920 | int is_regular = (tx->type == BNA_TX_T_REGULAR); | |
2921 | ||
2922 | list_for_each(qe, &tx->txq_q) { | |
2923 | txq = (struct bna_txq *)qe; | |
2924 | txq->tcb->priority = txq->priority; | |
2925 | /* Start IB */ | |
2926 | bna_ib_start(tx->bna, &txq->ib, is_regular); | |
2927 | } | |
2928 | tx->tx_resume_cbfn(tx->bna->bnad, tx); | |
2929 | } | |
2930 | ||
2931 | static void | |
2932 | bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) | |
2933 | { | |
2934 | switch (event) { | |
2935 | case TX_E_STOP: | |
2936 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
2937 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
2938 | bna_tx_enet_stop(tx); | |
2939 | break; | |
2940 | ||
2941 | case TX_E_FAIL: | |
2942 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
2943 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
2944 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
2945 | break; | |
2946 | ||
2947 | case TX_E_PRIO_CHANGE: | |
2948 | case TX_E_BW_UPDATE: | |
2949 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); | |
2950 | break; | |
2951 | ||
2952 | default: | |
2953 | bfa_sm_fault(event); | |
2954 | } | |
2955 | } | |
2956 | ||
2957 | static void | |
2958 | bna_tx_sm_stop_wait_entry(struct bna_tx *tx) | |
2959 | { | |
2960 | } | |
2961 | ||
2962 | static void | |
2963 | bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
2964 | { | |
2965 | switch (event) { | |
2966 | case TX_E_FAIL: | |
2967 | case TX_E_STOPPED: | |
2968 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
2969 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
2970 | break; | |
2971 | ||
2972 | case TX_E_STARTED: | |
2973 | /** | |
2974 | * We are here due to start_wait -> stop_wait transition on | |
2975 | * TX_E_STOP event | |
2976 | */ | |
2977 | bna_tx_enet_stop(tx); | |
2978 | break; | |
2979 | ||
2980 | case TX_E_PRIO_CHANGE: | |
2981 | case TX_E_BW_UPDATE: | |
2982 | /* No-op */ | |
2983 | break; | |
2984 | ||
2985 | default: | |
2986 | bfa_sm_fault(event); | |
2987 | } | |
2988 | } | |
2989 | ||
2990 | static void | |
2991 | bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) | |
2992 | { | |
2993 | } | |
2994 | ||
2995 | static void | |
2996 | bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) | |
2997 | { | |
2998 | switch (event) { | |
2999 | case TX_E_FAIL: | |
3000 | case TX_E_PRIO_CHANGE: | |
3001 | case TX_E_BW_UPDATE: | |
3002 | /* No-op */ | |
3003 | break; | |
3004 | ||
3005 | case TX_E_CLEANUP_DONE: | |
3006 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3007 | break; | |
3008 | ||
3009 | default: | |
3010 | bfa_sm_fault(event); | |
3011 | } | |
3012 | } | |
3013 | ||
3014 | static void | |
3015 | bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) | |
3016 | { | |
3017 | tx->tx_stall_cbfn(tx->bna->bnad, tx); | |
3018 | bna_tx_enet_stop(tx); | |
3019 | } | |
3020 | ||
3021 | static void | |
3022 | bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3023 | { | |
3024 | switch (event) { | |
3025 | case TX_E_STOP: | |
3026 | bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); | |
3027 | break; | |
3028 | ||
3029 | case TX_E_FAIL: | |
3030 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3031 | call_tx_prio_change_cbfn(tx); | |
3032 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3033 | break; | |
3034 | ||
3035 | case TX_E_STOPPED: | |
3036 | bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); | |
3037 | break; | |
3038 | ||
3039 | case TX_E_PRIO_CHANGE: | |
3040 | case TX_E_BW_UPDATE: | |
3041 | /* No-op */ | |
3042 | break; | |
3043 | ||
3044 | default: | |
3045 | bfa_sm_fault(event); | |
3046 | } | |
3047 | } | |
3048 | ||
3049 | static void | |
3050 | bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) | |
3051 | { | |
3052 | call_tx_prio_change_cbfn(tx); | |
3053 | tx->tx_cleanup_cbfn(tx->bna->bnad, tx); | |
3054 | } | |
3055 | ||
3056 | static void | |
3057 | bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3058 | { | |
3059 | switch (event) { | |
3060 | case TX_E_STOP: | |
3061 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3062 | break; | |
3063 | ||
3064 | case TX_E_FAIL: | |
3065 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3066 | break; | |
3067 | ||
3068 | case TX_E_PRIO_CHANGE: | |
3069 | case TX_E_BW_UPDATE: | |
3070 | /* No-op */ | |
3071 | break; | |
3072 | ||
3073 | case TX_E_CLEANUP_DONE: | |
3074 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
3075 | break; | |
3076 | ||
3077 | default: | |
3078 | bfa_sm_fault(event); | |
3079 | } | |
3080 | } | |
3081 | ||
3082 | static void | |
3083 | bna_tx_sm_failed_entry(struct bna_tx *tx) | |
3084 | { | |
3085 | } | |
3086 | ||
3087 | static void | |
3088 | bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) | |
3089 | { | |
3090 | switch (event) { | |
3091 | case TX_E_START: | |
3092 | bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); | |
3093 | break; | |
3094 | ||
3095 | case TX_E_STOP: | |
3096 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3097 | break; | |
3098 | ||
3099 | case TX_E_FAIL: | |
3100 | /* No-op */ | |
3101 | break; | |
3102 | ||
3103 | case TX_E_CLEANUP_DONE: | |
3104 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3105 | break; | |
3106 | ||
3107 | default: | |
3108 | bfa_sm_fault(event); | |
3109 | } | |
3110 | } | |
3111 | ||
3112 | static void | |
3113 | bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) | |
3114 | { | |
3115 | } | |
3116 | ||
3117 | static void | |
3118 | bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3119 | { | |
3120 | switch (event) { | |
3121 | case TX_E_STOP: | |
3122 | bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); | |
3123 | break; | |
3124 | ||
3125 | case TX_E_FAIL: | |
3126 | bfa_fsm_set_state(tx, bna_tx_sm_failed); | |
3127 | break; | |
3128 | ||
3129 | case TX_E_CLEANUP_DONE: | |
3130 | bfa_fsm_set_state(tx, bna_tx_sm_start_wait); | |
3131 | break; | |
3132 | ||
3133 | case TX_E_BW_UPDATE: | |
3134 | /* No-op */ | |
3135 | break; | |
3136 | ||
3137 | default: | |
3138 | bfa_sm_fault(event); | |
3139 | } | |
3140 | } | |
3141 | ||
3142 | static void | |
3143 | bna_bfi_tx_enet_start(struct bna_tx *tx) | |
3144 | { | |
3145 | struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; | |
3146 | struct bna_txq *txq = NULL; | |
3147 | struct list_head *qe; | |
3148 | int i; | |
3149 | ||
3150 | bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, | |
3151 | BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); | |
3152 | cfg_req->mh.num_entries = htons( | |
3153 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); | |
3154 | ||
3155 | cfg_req->num_queues = tx->num_txq; | |
3156 | for (i = 0, qe = bfa_q_first(&tx->txq_q); | |
3157 | i < tx->num_txq; | |
3158 | i++, qe = bfa_q_next(qe)) { | |
3159 | txq = (struct bna_txq *)qe; | |
3160 | ||
3161 | bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); | |
3162 | cfg_req->q_cfg[i].q.priority = txq->priority; | |
3163 | ||
3164 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = | |
3165 | txq->ib.ib_seg_host_addr.lsb; | |
3166 | cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = | |
3167 | txq->ib.ib_seg_host_addr.msb; | |
3168 | cfg_req->q_cfg[i].ib.intr.msix_index = | |
3169 | htons((u16)txq->ib.intr_vector); | |
3170 | } | |
3171 | ||
3172 | cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; | |
3173 | cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; | |
3174 | cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; | |
3175 | cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; | |
3176 | cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) | |
3177 | ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; | |
3178 | cfg_req->ib_cfg.coalescing_timeout = | |
3179 | htonl((u32)txq->ib.coalescing_timeo); | |
3180 | cfg_req->ib_cfg.inter_pkt_timeout = | |
3181 | htonl((u32)txq->ib.interpkt_timeo); | |
3182 | cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; | |
3183 | ||
3184 | cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; | |
3185 | cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); | |
3186 | cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED; | |
3187 | cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; | |
3188 | ||
3189 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, | |
3190 | sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); | |
3191 | bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); | |
3192 | } | |
3193 | ||
3194 | static void | |
3195 | bna_bfi_tx_enet_stop(struct bna_tx *tx) | |
3196 | { | |
3197 | struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; | |
3198 | ||
3199 | bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, | |
3200 | BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); | |
3201 | req->mh.num_entries = htons( | |
3202 | bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); | |
3203 | bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), | |
3204 | &req->mh); | |
3205 | bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); | |
3206 | } | |
3207 | ||
3208 | static void | |
3209 | bna_tx_enet_stop(struct bna_tx *tx) | |
3210 | { | |
3211 | struct bna_txq *txq; | |
3212 | struct list_head *qe; | |
3213 | ||
3214 | /* Stop IB */ | |
3215 | list_for_each(qe, &tx->txq_q) { | |
3216 | txq = (struct bna_txq *)qe; | |
3217 | bna_ib_stop(tx->bna, &txq->ib); | |
3218 | } | |
3219 | ||
3220 | bna_bfi_tx_enet_stop(tx); | |
3221 | } | |
3222 | ||
3223 | static void | |
3224 | bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, | |
3225 | struct bna_mem_descr *qpt_mem, | |
3226 | struct bna_mem_descr *swqpt_mem, | |
3227 | struct bna_mem_descr *page_mem) | |
3228 | { | |
3229 | int i; | |
3230 | ||
3231 | txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
3232 | txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
3233 | txq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
3234 | txq->qpt.page_count = page_count; | |
3235 | txq->qpt.page_size = page_size; | |
3236 | ||
3237 | txq->tcb->sw_qpt = (void **) swqpt_mem->kva; | |
3238 | ||
3239 | for (i = 0; i < page_count; i++) { | |
3240 | txq->tcb->sw_qpt[i] = page_mem[i].kva; | |
3241 | ||
3242 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = | |
3243 | page_mem[i].dma.lsb; | |
3244 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = | |
3245 | page_mem[i].dma.msb; | |
3246 | } | |
3247 | } | |
3248 | ||
3249 | static struct bna_tx * | |
3250 | bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3251 | { | |
3252 | struct list_head *qe = NULL; | |
3253 | struct bna_tx *tx = NULL; | |
3254 | ||
3255 | if (list_empty(&tx_mod->tx_free_q)) | |
3256 | return NULL; | |
3257 | if (type == BNA_TX_T_REGULAR) { | |
3258 | bfa_q_deq(&tx_mod->tx_free_q, &qe); | |
3259 | } else { | |
3260 | bfa_q_deq_tail(&tx_mod->tx_free_q, &qe); | |
3261 | } | |
3262 | tx = (struct bna_tx *)qe; | |
3263 | bfa_q_qe_init(&tx->qe); | |
3264 | tx->type = type; | |
3265 | ||
3266 | return tx; | |
3267 | } | |
3268 | ||
3269 | static void | |
3270 | bna_tx_free(struct bna_tx *tx) | |
3271 | { | |
3272 | struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; | |
3273 | struct bna_txq *txq; | |
3274 | struct list_head *prev_qe; | |
3275 | struct list_head *qe; | |
3276 | ||
3277 | while (!list_empty(&tx->txq_q)) { | |
3278 | bfa_q_deq(&tx->txq_q, &txq); | |
3279 | bfa_q_qe_init(&txq->qe); | |
3280 | txq->tcb = NULL; | |
3281 | txq->tx = NULL; | |
3282 | list_add_tail(&txq->qe, &tx_mod->txq_free_q); | |
3283 | } | |
3284 | ||
3285 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3286 | if (qe == &tx->qe) { | |
3287 | list_del(&tx->qe); | |
3288 | bfa_q_qe_init(&tx->qe); | |
3289 | break; | |
3290 | } | |
3291 | } | |
3292 | ||
3293 | tx->bna = NULL; | |
3294 | tx->priv = NULL; | |
3295 | ||
3296 | prev_qe = NULL; | |
3297 | list_for_each(qe, &tx_mod->tx_free_q) { | |
3298 | if (((struct bna_tx *)qe)->rid < tx->rid) | |
3299 | prev_qe = qe; | |
3300 | else { | |
3301 | break; | |
3302 | } | |
3303 | } | |
3304 | ||
3305 | if (prev_qe == NULL) { | |
3306 | /* This is the first entry */ | |
3307 | bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); | |
3308 | } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) { | |
3309 | /* This is the last entry */ | |
3310 | list_add_tail(&tx->qe, &tx_mod->tx_free_q); | |
3311 | } else { | |
3312 | /* Somewhere in the middle */ | |
3313 | bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); | |
3314 | bfa_q_prev(&tx->qe) = prev_qe; | |
3315 | bfa_q_next(prev_qe) = &tx->qe; | |
3316 | bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; | |
3317 | } | |
3318 | } | |
3319 | ||
3320 | static void | |
3321 | bna_tx_start(struct bna_tx *tx) | |
3322 | { | |
3323 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3324 | if (tx->flags & BNA_TX_F_ENABLED) | |
3325 | bfa_fsm_send_event(tx, TX_E_START); | |
3326 | } | |
3327 | ||
3328 | static void | |
3329 | bna_tx_stop(struct bna_tx *tx) | |
3330 | { | |
3331 | tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; | |
3332 | tx->stop_cbarg = &tx->bna->tx_mod; | |
3333 | ||
3334 | tx->flags &= ~BNA_TX_F_ENET_STARTED; | |
3335 | bfa_fsm_send_event(tx, TX_E_STOP); | |
3336 | } | |
3337 | ||
3338 | static void | |
3339 | bna_tx_fail(struct bna_tx *tx) | |
3340 | { | |
3341 | tx->flags &= ~BNA_TX_F_ENET_STARTED; | |
3342 | bfa_fsm_send_event(tx, TX_E_FAIL); | |
3343 | } | |
3344 | ||
3345 | void | |
3346 | bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) | |
3347 | { | |
3348 | struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; | |
3349 | struct bna_txq *txq = NULL; | |
3350 | struct list_head *qe; | |
3351 | int i; | |
3352 | ||
3353 | bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, | |
3354 | sizeof(struct bfi_enet_tx_cfg_rsp)); | |
3355 | ||
3356 | tx->hw_id = cfg_rsp->hw_id; | |
3357 | ||
3358 | for (i = 0, qe = bfa_q_first(&tx->txq_q); | |
3359 | i < tx->num_txq; i++, qe = bfa_q_next(qe)) { | |
3360 | txq = (struct bna_txq *)qe; | |
3361 | ||
3362 | /* Setup doorbells */ | |
3363 | txq->tcb->i_dbell->doorbell_addr = | |
3364 | tx->bna->pcidev.pci_bar_kva | |
3365 | + ntohl(cfg_rsp->q_handles[i].i_dbell); | |
3366 | txq->tcb->q_dbell = | |
3367 | tx->bna->pcidev.pci_bar_kva | |
3368 | + ntohl(cfg_rsp->q_handles[i].q_dbell); | |
3369 | txq->hw_id = cfg_rsp->q_handles[i].hw_qid; | |
3370 | ||
3371 | /* Initialize producer/consumer indexes */ | |
3372 | (*txq->tcb->hw_consumer_index) = 0; | |
3373 | txq->tcb->producer_index = txq->tcb->consumer_index = 0; | |
3374 | } | |
3375 | ||
3376 | bfa_fsm_send_event(tx, TX_E_STARTED); | |
3377 | } | |
3378 | ||
3379 | void | |
3380 | bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) | |
3381 | { | |
3382 | bfa_fsm_send_event(tx, TX_E_STOPPED); | |
3383 | } | |
3384 | ||
3385 | void | |
3386 | bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) | |
3387 | { | |
3388 | struct bna_tx *tx; | |
3389 | struct list_head *qe; | |
3390 | ||
3391 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3392 | tx = (struct bna_tx *)qe; | |
3393 | bfa_fsm_send_event(tx, TX_E_BW_UPDATE); | |
3394 | } | |
3395 | } | |
3396 | ||
3397 | void | |
3398 | bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) | |
3399 | { | |
3400 | u32 q_size; | |
3401 | u32 page_count; | |
3402 | struct bna_mem_info *mem_info; | |
3403 | ||
3404 | res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; | |
3405 | mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; | |
3406 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3407 | mem_info->len = sizeof(struct bna_tcb); | |
3408 | mem_info->num = num_txq; | |
3409 | ||
3410 | q_size = txq_depth * BFI_TXQ_WI_SIZE; | |
3411 | q_size = ALIGN(q_size, PAGE_SIZE); | |
3412 | page_count = q_size >> PAGE_SHIFT; | |
3413 | ||
3414 | res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; | |
3415 | mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; | |
3416 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3417 | mem_info->len = page_count * sizeof(struct bna_dma_addr); | |
3418 | mem_info->num = num_txq; | |
3419 | ||
3420 | res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; | |
3421 | mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; | |
3422 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3423 | mem_info->len = page_count * sizeof(void *); | |
3424 | mem_info->num = num_txq; | |
3425 | ||
3426 | res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; | |
3427 | mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; | |
3428 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3429 | mem_info->len = PAGE_SIZE; | |
3430 | mem_info->num = num_txq * page_count; | |
3431 | ||
3432 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; | |
3433 | mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; | |
3434 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3435 | mem_info->len = BFI_IBIDX_SIZE; | |
3436 | mem_info->num = num_txq; | |
3437 | ||
3438 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; | |
3439 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = | |
3440 | BNA_INTR_T_MSIX; | |
3441 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; | |
3442 | } | |
3443 | ||
3444 | struct bna_tx * | |
3445 | bna_tx_create(struct bna *bna, struct bnad *bnad, | |
3446 | struct bna_tx_config *tx_cfg, | |
d91d25d5 | 3447 | const struct bna_tx_event_cbfn *tx_cbfn, |
f3bd5173 RM |
3448 | struct bna_res_info *res_info, void *priv) |
3449 | { | |
3450 | struct bna_intr_info *intr_info; | |
3451 | struct bna_tx_mod *tx_mod = &bna->tx_mod; | |
3452 | struct bna_tx *tx; | |
3453 | struct bna_txq *txq; | |
3454 | struct list_head *qe; | |
3455 | int page_count; | |
3456 | int page_size; | |
3457 | int page_idx; | |
3458 | int i; | |
3459 | ||
3460 | intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
3461 | page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / | |
3462 | tx_cfg->num_txq; | |
3463 | page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len; | |
3464 | ||
3465 | /** | |
3466 | * Get resources | |
3467 | */ | |
3468 | ||
3469 | if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) | |
3470 | return NULL; | |
3471 | ||
3472 | /* Tx */ | |
3473 | ||
3474 | tx = bna_tx_get(tx_mod, tx_cfg->tx_type); | |
3475 | if (!tx) | |
3476 | return NULL; | |
3477 | tx->bna = bna; | |
3478 | tx->priv = priv; | |
3479 | ||
3480 | /* TxQs */ | |
3481 | ||
3482 | INIT_LIST_HEAD(&tx->txq_q); | |
3483 | for (i = 0; i < tx_cfg->num_txq; i++) { | |
3484 | if (list_empty(&tx_mod->txq_free_q)) | |
3485 | goto err_return; | |
3486 | ||
3487 | bfa_q_deq(&tx_mod->txq_free_q, &txq); | |
3488 | bfa_q_qe_init(&txq->qe); | |
3489 | list_add_tail(&txq->qe, &tx->txq_q); | |
3490 | txq->tx = tx; | |
3491 | } | |
3492 | ||
3493 | /* | |
3494 | * Initialize | |
3495 | */ | |
3496 | ||
3497 | /* Tx */ | |
3498 | ||
3499 | tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; | |
3500 | tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; | |
3501 | /* Following callbacks are mandatory */ | |
3502 | tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; | |
3503 | tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; | |
3504 | tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; | |
3505 | ||
3506 | list_add_tail(&tx->qe, &tx_mod->tx_active_q); | |
3507 | ||
3508 | tx->num_txq = tx_cfg->num_txq; | |
3509 | ||
3510 | tx->flags = 0; | |
3511 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { | |
3512 | switch (tx->type) { | |
3513 | case BNA_TX_T_REGULAR: | |
3514 | if (!(tx->bna->tx_mod.flags & | |
3515 | BNA_TX_MOD_F_ENET_LOOPBACK)) | |
3516 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3517 | break; | |
3518 | case BNA_TX_T_LOOPBACK: | |
3519 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) | |
3520 | tx->flags |= BNA_TX_F_ENET_STARTED; | |
3521 | break; | |
3522 | } | |
3523 | } | |
3524 | ||
3525 | /* TxQ */ | |
3526 | ||
3527 | i = 0; | |
3528 | page_idx = 0; | |
3529 | list_for_each(qe, &tx->txq_q) { | |
3530 | txq = (struct bna_txq *)qe; | |
3531 | txq->tcb = (struct bna_tcb *) | |
3532 | res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; | |
3533 | txq->tx_packets = 0; | |
3534 | txq->tx_bytes = 0; | |
3535 | ||
3536 | /* IB */ | |
3537 | txq->ib.ib_seg_host_addr.lsb = | |
3538 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; | |
3539 | txq->ib.ib_seg_host_addr.msb = | |
3540 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; | |
3541 | txq->ib.ib_seg_host_addr_kva = | |
3542 | res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; | |
3543 | txq->ib.intr_type = intr_info->intr_type; | |
3544 | txq->ib.intr_vector = (intr_info->num == 1) ? | |
3545 | intr_info->idl[0].vector : | |
3546 | intr_info->idl[i].vector; | |
3547 | if (intr_info->intr_type == BNA_INTR_T_INTX) | |
3548 | txq->ib.intr_vector = (1 << txq->ib.intr_vector); | |
3549 | txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; | |
3550 | txq->ib.interpkt_timeo = 0; /* Not used */ | |
3551 | txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; | |
3552 | ||
3553 | /* TCB */ | |
3554 | ||
3555 | txq->tcb->q_depth = tx_cfg->txq_depth; | |
3556 | txq->tcb->unmap_q = (void *) | |
3557 | res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; | |
3558 | txq->tcb->hw_consumer_index = | |
3559 | (u32 *)txq->ib.ib_seg_host_addr_kva; | |
3560 | txq->tcb->i_dbell = &txq->ib.door_bell; | |
3561 | txq->tcb->intr_type = txq->ib.intr_type; | |
3562 | txq->tcb->intr_vector = txq->ib.intr_vector; | |
3563 | txq->tcb->txq = txq; | |
3564 | txq->tcb->bnad = bnad; | |
3565 | txq->tcb->id = i; | |
3566 | ||
3567 | /* QPT, SWQPT, Pages */ | |
3568 | bna_txq_qpt_setup(txq, page_count, page_size, | |
3569 | &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], | |
3570 | &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], | |
3571 | &res_info[BNA_TX_RES_MEM_T_PAGE]. | |
3572 | res_u.mem_info.mdl[page_idx]); | |
3573 | txq->tcb->page_idx = page_idx; | |
3574 | txq->tcb->page_count = page_count; | |
3575 | page_idx += page_count; | |
3576 | ||
3577 | /* Callback to bnad for setting up TCB */ | |
3578 | if (tx->tcb_setup_cbfn) | |
3579 | (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); | |
3580 | ||
3581 | if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) | |
3582 | txq->priority = txq->tcb->id; | |
3583 | else | |
3584 | txq->priority = tx_mod->default_prio; | |
3585 | ||
3586 | i++; | |
3587 | } | |
3588 | ||
3589 | tx->txf_vlan_id = 0; | |
3590 | ||
3591 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3592 | ||
3593 | tx_mod->rid_mask |= (1 << tx->rid); | |
3594 | ||
3595 | return tx; | |
3596 | ||
3597 | err_return: | |
3598 | bna_tx_free(tx); | |
3599 | return NULL; | |
3600 | } | |
3601 | ||
3602 | void | |
3603 | bna_tx_destroy(struct bna_tx *tx) | |
3604 | { | |
3605 | struct bna_txq *txq; | |
3606 | struct list_head *qe; | |
3607 | ||
3608 | list_for_each(qe, &tx->txq_q) { | |
3609 | txq = (struct bna_txq *)qe; | |
3610 | if (tx->tcb_destroy_cbfn) | |
3611 | (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); | |
3612 | } | |
3613 | ||
3614 | tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); | |
3615 | bna_tx_free(tx); | |
3616 | } | |
3617 | ||
3618 | void | |
3619 | bna_tx_enable(struct bna_tx *tx) | |
3620 | { | |
3621 | if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) | |
3622 | return; | |
3623 | ||
3624 | tx->flags |= BNA_TX_F_ENABLED; | |
3625 | ||
3626 | if (tx->flags & BNA_TX_F_ENET_STARTED) | |
3627 | bfa_fsm_send_event(tx, TX_E_START); | |
3628 | } | |
3629 | ||
3630 | void | |
3631 | bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, | |
3632 | void (*cbfn)(void *, struct bna_tx *)) | |
3633 | { | |
3634 | if (type == BNA_SOFT_CLEANUP) { | |
3635 | (*cbfn)(tx->bna->bnad, tx); | |
3636 | return; | |
3637 | } | |
3638 | ||
3639 | tx->stop_cbfn = cbfn; | |
3640 | tx->stop_cbarg = tx->bna->bnad; | |
3641 | ||
3642 | tx->flags &= ~BNA_TX_F_ENABLED; | |
3643 | ||
3644 | bfa_fsm_send_event(tx, TX_E_STOP); | |
3645 | } | |
3646 | ||
3647 | void | |
3648 | bna_tx_cleanup_complete(struct bna_tx *tx) | |
3649 | { | |
3650 | bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); | |
3651 | } | |
3652 | ||
3653 | static void | |
3654 | bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) | |
3655 | { | |
3656 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3657 | ||
3658 | bfa_wc_down(&tx_mod->tx_stop_wc); | |
3659 | } | |
3660 | ||
3661 | static void | |
3662 | bna_tx_mod_cb_tx_stopped_all(void *arg) | |
3663 | { | |
3664 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3665 | ||
3666 | if (tx_mod->stop_cbfn) | |
3667 | tx_mod->stop_cbfn(&tx_mod->bna->enet); | |
3668 | tx_mod->stop_cbfn = NULL; | |
3669 | } | |
3670 | ||
3671 | void | |
3672 | bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, | |
3673 | struct bna_res_info *res_info) | |
3674 | { | |
3675 | int i; | |
3676 | ||
3677 | tx_mod->bna = bna; | |
3678 | tx_mod->flags = 0; | |
3679 | ||
3680 | tx_mod->tx = (struct bna_tx *) | |
3681 | res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; | |
3682 | tx_mod->txq = (struct bna_txq *) | |
3683 | res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
3684 | ||
3685 | INIT_LIST_HEAD(&tx_mod->tx_free_q); | |
3686 | INIT_LIST_HEAD(&tx_mod->tx_active_q); | |
3687 | ||
3688 | INIT_LIST_HEAD(&tx_mod->txq_free_q); | |
3689 | ||
3690 | for (i = 0; i < bna->ioceth.attr.num_txq; i++) { | |
3691 | tx_mod->tx[i].rid = i; | |
3692 | bfa_q_qe_init(&tx_mod->tx[i].qe); | |
3693 | list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); | |
3694 | bfa_q_qe_init(&tx_mod->txq[i].qe); | |
3695 | list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); | |
3696 | } | |
3697 | ||
3698 | tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; | |
3699 | tx_mod->default_prio = 0; | |
3700 | tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; | |
3701 | tx_mod->iscsi_prio = -1; | |
3702 | } | |
3703 | ||
3704 | void | |
3705 | bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) | |
3706 | { | |
3707 | struct list_head *qe; | |
3708 | int i; | |
3709 | ||
3710 | i = 0; | |
3711 | list_for_each(qe, &tx_mod->tx_free_q) | |
3712 | i++; | |
3713 | ||
3714 | i = 0; | |
3715 | list_for_each(qe, &tx_mod->txq_free_q) | |
3716 | i++; | |
3717 | ||
3718 | tx_mod->bna = NULL; | |
3719 | } | |
3720 | ||
3721 | void | |
3722 | bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3723 | { | |
3724 | struct bna_tx *tx; | |
3725 | struct list_head *qe; | |
3726 | ||
3727 | tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; | |
3728 | if (type == BNA_TX_T_LOOPBACK) | |
3729 | tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; | |
3730 | ||
3731 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3732 | tx = (struct bna_tx *)qe; | |
3733 | if (tx->type == type) | |
3734 | bna_tx_start(tx); | |
3735 | } | |
3736 | } | |
3737 | ||
3738 | void | |
3739 | bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
3740 | { | |
3741 | struct bna_tx *tx; | |
3742 | struct list_head *qe; | |
3743 | ||
3744 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; | |
3745 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; | |
3746 | ||
3747 | tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; | |
3748 | ||
3749 | bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); | |
3750 | ||
3751 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3752 | tx = (struct bna_tx *)qe; | |
3753 | if (tx->type == type) { | |
3754 | bfa_wc_up(&tx_mod->tx_stop_wc); | |
3755 | bna_tx_stop(tx); | |
3756 | } | |
3757 | } | |
3758 | ||
3759 | bfa_wc_wait(&tx_mod->tx_stop_wc); | |
3760 | } | |
3761 | ||
3762 | void | |
3763 | bna_tx_mod_fail(struct bna_tx_mod *tx_mod) | |
3764 | { | |
3765 | struct bna_tx *tx; | |
3766 | struct list_head *qe; | |
3767 | ||
3768 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; | |
3769 | tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; | |
3770 | ||
3771 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3772 | tx = (struct bna_tx *)qe; | |
3773 | bna_tx_fail(tx); | |
3774 | } | |
3775 | } | |
3776 | ||
3777 | void | |
3778 | bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) | |
3779 | { | |
3780 | struct bna_txq *txq; | |
3781 | struct list_head *qe; | |
3782 | ||
3783 | list_for_each(qe, &tx->txq_q) { | |
3784 | txq = (struct bna_txq *)qe; | |
3785 | bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); | |
3786 | } | |
3787 | } |