Commit | Line | Data |
---|---|---|
8b230ed8 RM |
1 | /* |
2 | * Linux network driver for Brocade Converged Network Adapter. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
14 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | |
15 | * All rights reserved | |
16 | * www.brocade.com | |
17 | */ | |
18 | #include "bna.h" | |
758ccc34 | 19 | #include "bfa_cs.h" |
8b230ed8 RM |
20 | #include "bfi.h" |
21 | ||
22 | /** | |
23 | * IB | |
24 | */ | |
25 | #define bna_ib_find_free_ibidx(_mask, _pos)\ | |
26 | do {\ | |
27 | (_pos) = 0;\ | |
28 | while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\ | |
29 | ((1 << (_pos)) & (_mask)))\ | |
30 | (_pos)++;\ | |
31 | } while (0) | |
32 | ||
33 | #define bna_ib_count_ibidx(_mask, _count)\ | |
34 | do {\ | |
35 | int pos = 0;\ | |
36 | (_count) = 0;\ | |
37 | while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\ | |
38 | if ((1 << pos) & (_mask))\ | |
39 | (_count) = pos + 1;\ | |
40 | pos++;\ | |
41 | } \ | |
42 | } while (0) | |
43 | ||
44 | #define bna_ib_select_segpool(_count, _q_idx)\ | |
45 | do {\ | |
46 | int i;\ | |
47 | (_q_idx) = -1;\ | |
48 | for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\ | |
49 | if ((_count <= ibidx_pool[i].pool_entry_size)) {\ | |
50 | (_q_idx) = i;\ | |
51 | break;\ | |
52 | } \ | |
53 | } \ | |
54 | } while (0) | |
55 | ||
56 | struct bna_ibidx_pool { | |
57 | int pool_size; | |
58 | int pool_entry_size; | |
59 | }; | |
60 | init_ibidx_pool(ibidx_pool); | |
61 | ||
62 | static struct bna_intr * | |
63 | bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type, | |
64 | int vector) | |
65 | { | |
66 | struct bna_intr *intr; | |
67 | struct list_head *qe; | |
68 | ||
69 | list_for_each(qe, &ib_mod->intr_active_q) { | |
70 | intr = (struct bna_intr *)qe; | |
71 | ||
72 | if ((intr->intr_type == intr_type) && | |
73 | (intr->vector == vector)) { | |
74 | intr->ref_count++; | |
75 | return intr; | |
76 | } | |
77 | } | |
78 | ||
79 | if (list_empty(&ib_mod->intr_free_q)) | |
80 | return NULL; | |
81 | ||
82 | bfa_q_deq(&ib_mod->intr_free_q, &intr); | |
83 | bfa_q_qe_init(&intr->qe); | |
84 | ||
85 | intr->ref_count = 1; | |
86 | intr->intr_type = intr_type; | |
87 | intr->vector = vector; | |
88 | ||
89 | list_add_tail(&intr->qe, &ib_mod->intr_active_q); | |
90 | ||
91 | return intr; | |
92 | } | |
93 | ||
94 | static void | |
95 | bna_intr_put(struct bna_ib_mod *ib_mod, | |
96 | struct bna_intr *intr) | |
97 | { | |
98 | intr->ref_count--; | |
99 | ||
100 | if (intr->ref_count == 0) { | |
101 | intr->ib = NULL; | |
102 | list_del(&intr->qe); | |
103 | bfa_q_qe_init(&intr->qe); | |
104 | list_add_tail(&intr->qe, &ib_mod->intr_free_q); | |
105 | } | |
106 | } | |
107 | ||
108 | void | |
109 | bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna, | |
110 | struct bna_res_info *res_info) | |
111 | { | |
112 | int i; | |
113 | int j; | |
114 | int count; | |
115 | u8 offset; | |
116 | struct bna_doorbell_qset *qset; | |
117 | unsigned long off; | |
118 | ||
119 | ib_mod->bna = bna; | |
120 | ||
121 | ib_mod->ib = (struct bna_ib *) | |
122 | res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva; | |
123 | ib_mod->intr = (struct bna_intr *) | |
124 | res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva; | |
125 | ib_mod->idx_seg = (struct bna_ibidx_seg *) | |
126 | res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva; | |
127 | ||
128 | INIT_LIST_HEAD(&ib_mod->ib_free_q); | |
129 | INIT_LIST_HEAD(&ib_mod->intr_free_q); | |
130 | INIT_LIST_HEAD(&ib_mod->intr_active_q); | |
131 | ||
132 | for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) | |
133 | INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]); | |
134 | ||
135 | for (i = 0; i < BFI_MAX_IB; i++) { | |
136 | ib_mod->ib[i].ib_id = i; | |
137 | ||
138 | ib_mod->ib[i].ib_seg_host_addr_kva = | |
139 | res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; | |
140 | ib_mod->ib[i].ib_seg_host_addr.lsb = | |
141 | res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; | |
142 | ib_mod->ib[i].ib_seg_host_addr.msb = | |
143 | res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; | |
144 | ||
145 | qset = (struct bna_doorbell_qset *)0; | |
146 | off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1) | |
147 | * (0x20 >> 2)]); | |
148 | ib_mod->ib[i].door_bell.doorbell_addr = off + | |
149 | BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva); | |
150 | ||
151 | bfa_q_qe_init(&ib_mod->ib[i].qe); | |
152 | list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q); | |
153 | ||
154 | bfa_q_qe_init(&ib_mod->intr[i].qe); | |
155 | list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q); | |
156 | } | |
157 | ||
158 | count = 0; | |
159 | offset = 0; | |
160 | for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { | |
161 | for (j = 0; j < ibidx_pool[i].pool_size; j++) { | |
162 | bfa_q_qe_init(&ib_mod->idx_seg[count]); | |
163 | ib_mod->idx_seg[count].ib_seg_size = | |
164 | ibidx_pool[i].pool_entry_size; | |
165 | ib_mod->idx_seg[count].ib_idx_tbl_offset = offset; | |
166 | list_add_tail(&ib_mod->idx_seg[count].qe, | |
167 | &ib_mod->ibidx_seg_pool[i]); | |
168 | count++; | |
169 | offset += ibidx_pool[i].pool_entry_size; | |
170 | } | |
171 | } | |
172 | } | |
173 | ||
174 | void | |
175 | bna_ib_mod_uninit(struct bna_ib_mod *ib_mod) | |
176 | { | |
177 | int i; | |
178 | int j; | |
179 | struct list_head *qe; | |
180 | ||
181 | i = 0; | |
182 | list_for_each(qe, &ib_mod->ib_free_q) | |
183 | i++; | |
184 | ||
185 | i = 0; | |
186 | list_for_each(qe, &ib_mod->intr_free_q) | |
187 | i++; | |
188 | ||
189 | for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) { | |
190 | j = 0; | |
191 | list_for_each(qe, &ib_mod->ibidx_seg_pool[i]) | |
192 | j++; | |
193 | } | |
194 | ||
195 | ib_mod->bna = NULL; | |
196 | } | |
197 | ||
b7ee31c5 | 198 | static struct bna_ib * |
8b230ed8 RM |
199 | bna_ib_get(struct bna_ib_mod *ib_mod, |
200 | enum bna_intr_type intr_type, | |
201 | int vector) | |
202 | { | |
203 | struct bna_ib *ib; | |
204 | struct bna_intr *intr; | |
205 | ||
206 | if (intr_type == BNA_INTR_T_INTX) | |
207 | vector = (1 << vector); | |
208 | ||
209 | intr = bna_intr_get(ib_mod, intr_type, vector); | |
210 | if (intr == NULL) | |
211 | return NULL; | |
212 | ||
213 | if (intr->ib) { | |
214 | if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) { | |
215 | bna_intr_put(ib_mod, intr); | |
216 | return NULL; | |
217 | } | |
218 | intr->ib->ref_count++; | |
219 | return intr->ib; | |
220 | } | |
221 | ||
222 | if (list_empty(&ib_mod->ib_free_q)) { | |
223 | bna_intr_put(ib_mod, intr); | |
224 | return NULL; | |
225 | } | |
226 | ||
227 | bfa_q_deq(&ib_mod->ib_free_q, &ib); | |
228 | bfa_q_qe_init(&ib->qe); | |
229 | ||
230 | ib->ref_count = 1; | |
231 | ib->start_count = 0; | |
232 | ib->idx_mask = 0; | |
233 | ||
234 | ib->intr = intr; | |
235 | ib->idx_seg = NULL; | |
236 | intr->ib = ib; | |
237 | ||
238 | ib->bna = ib_mod->bna; | |
239 | ||
240 | return ib; | |
241 | } | |
242 | ||
b7ee31c5 | 243 | static void |
8b230ed8 RM |
244 | bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) |
245 | { | |
246 | bna_intr_put(ib_mod, ib->intr); | |
247 | ||
248 | ib->ref_count--; | |
249 | ||
250 | if (ib->ref_count == 0) { | |
251 | ib->intr = NULL; | |
252 | ib->bna = NULL; | |
253 | list_add_tail(&ib->qe, &ib_mod->ib_free_q); | |
254 | } | |
255 | } | |
256 | ||
257 | /* Returns index offset - starting from 0 */ | |
b7ee31c5 | 258 | static int |
8b230ed8 RM |
259 | bna_ib_reserve_idx(struct bna_ib *ib) |
260 | { | |
261 | struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; | |
262 | struct bna_ibidx_seg *idx_seg; | |
263 | int idx; | |
264 | int num_idx; | |
265 | int q_idx; | |
266 | ||
267 | /* Find the first free index position */ | |
268 | bna_ib_find_free_ibidx(ib->idx_mask, idx); | |
269 | if (idx == BFI_IBIDX_MAX_SEGSIZE) | |
270 | return -1; | |
271 | ||
272 | /* | |
273 | * Calculate the total number of indexes held by this IB, | |
274 | * including the index newly reserved above. | |
275 | */ | |
276 | bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx); | |
277 | ||
278 | /* See if there is a free space in the index segment held by this IB */ | |
279 | if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) { | |
280 | ib->idx_mask |= (1 << idx); | |
281 | return idx; | |
282 | } | |
283 | ||
284 | if (ib->start_count) | |
285 | return -1; | |
286 | ||
287 | /* Allocate a new segment */ | |
288 | bna_ib_select_segpool(num_idx, q_idx); | |
289 | while (1) { | |
290 | if (q_idx == BFI_IBIDX_TOTAL_POOLS) | |
291 | return -1; | |
292 | if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx])) | |
293 | break; | |
294 | q_idx++; | |
295 | } | |
296 | bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg); | |
297 | bfa_q_qe_init(&idx_seg->qe); | |
298 | ||
299 | /* Free the old segment */ | |
300 | if (ib->idx_seg) { | |
301 | bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx); | |
302 | list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]); | |
303 | } | |
304 | ||
305 | ib->idx_seg = idx_seg; | |
306 | ||
307 | ib->idx_mask |= (1 << idx); | |
308 | ||
309 | return idx; | |
310 | } | |
311 | ||
b7ee31c5 | 312 | static void |
8b230ed8 RM |
313 | bna_ib_release_idx(struct bna_ib *ib, int idx) |
314 | { | |
315 | struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; | |
316 | struct bna_ibidx_seg *idx_seg; | |
317 | int num_idx; | |
318 | int cur_q_idx; | |
319 | int new_q_idx; | |
320 | ||
321 | ib->idx_mask &= ~(1 << idx); | |
322 | ||
323 | if (ib->start_count) | |
324 | return; | |
325 | ||
326 | bna_ib_count_ibidx(ib->idx_mask, num_idx); | |
327 | ||
328 | /* | |
329 | * Free the segment, if there are no more indexes in the segment | |
330 | * held by this IB | |
331 | */ | |
332 | if (!num_idx) { | |
333 | bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); | |
334 | list_add_tail(&ib->idx_seg->qe, | |
335 | &ib_mod->ibidx_seg_pool[cur_q_idx]); | |
336 | ib->idx_seg = NULL; | |
337 | return; | |
338 | } | |
339 | ||
340 | /* See if we can move to a smaller segment */ | |
341 | bna_ib_select_segpool(num_idx, new_q_idx); | |
342 | bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx); | |
343 | while (new_q_idx < cur_q_idx) { | |
344 | if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx])) | |
345 | break; | |
346 | new_q_idx++; | |
347 | } | |
348 | if (new_q_idx < cur_q_idx) { | |
349 | /* Select the new smaller segment */ | |
350 | bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg); | |
351 | bfa_q_qe_init(&idx_seg->qe); | |
352 | /* Free the old segment */ | |
353 | list_add_tail(&ib->idx_seg->qe, | |
354 | &ib_mod->ibidx_seg_pool[cur_q_idx]); | |
355 | ib->idx_seg = idx_seg; | |
356 | } | |
357 | } | |
358 | ||
b7ee31c5 | 359 | static int |
8b230ed8 RM |
360 | bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) |
361 | { | |
362 | if (ib->start_count) | |
363 | return -1; | |
364 | ||
365 | ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo; | |
366 | ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo; | |
367 | ib->ib_config.interpkt_count = ib_config->interpkt_count; | |
368 | ib->ib_config.ctrl_flags = ib_config->ctrl_flags; | |
369 | ||
370 | ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE; | |
371 | if (ib->intr->intr_type == BNA_INTR_T_MSIX) | |
372 | ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE; | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
b7ee31c5 | 377 | static void |
8b230ed8 RM |
378 | bna_ib_start(struct bna_ib *ib) |
379 | { | |
380 | struct bna_ib_blk_mem ib_cfg; | |
381 | struct bna_ib_blk_mem *ib_mem; | |
382 | u32 pg_num; | |
383 | u32 intx_mask; | |
384 | int i; | |
385 | void __iomem *base_addr; | |
386 | unsigned long off; | |
387 | ||
388 | ib->start_count++; | |
389 | ||
390 | if (ib->start_count > 1) | |
391 | return; | |
392 | ||
393 | ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb); | |
394 | ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb); | |
395 | ||
396 | ib_cfg.clsc_n_ctrl_n_msix = (((u32) | |
397 | ib->ib_config.coalescing_timeo << 16) | | |
398 | ((u32)ib->ib_config.ctrl_flags << 8) | | |
399 | (ib->intr->vector)); | |
400 | ib_cfg.ipkt_n_ent_n_idxof = | |
401 | ((u32) | |
402 | (ib->ib_config.interpkt_timeo & 0xf) << 16) | | |
403 | ((u32)ib->idx_seg->ib_seg_size << 8) | | |
404 | (ib->idx_seg->ib_idx_tbl_offset); | |
405 | ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32) | |
406 | ib->ib_config.interpkt_count << 24); | |
407 | ||
408 | pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, | |
409 | HQM_IB_RAM_BASE_OFFSET); | |
410 | writel(pg_num, ib->bna->regs.page_addr); | |
411 | ||
412 | base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, | |
413 | HQM_IB_RAM_BASE_OFFSET); | |
414 | ||
415 | ib_mem = (struct bna_ib_blk_mem *)0; | |
416 | off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo; | |
417 | writel(htonl(ib_cfg.host_addr_lo), base_addr + off); | |
418 | ||
419 | off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi; | |
420 | writel(htonl(ib_cfg.host_addr_hi), base_addr + off); | |
421 | ||
422 | off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix; | |
423 | writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off); | |
424 | ||
425 | off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof; | |
426 | writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off); | |
427 | ||
428 | off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked; | |
429 | writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off); | |
430 | ||
431 | ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( | |
432 | (u32)ib->ib_config.coalescing_timeo, 0); | |
433 | ||
434 | pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num, | |
435 | HQM_INDX_TBL_RAM_BASE_OFFSET); | |
436 | writel(pg_num, ib->bna->regs.page_addr); | |
437 | ||
438 | base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva, | |
439 | HQM_INDX_TBL_RAM_BASE_OFFSET); | |
440 | for (i = 0; i < ib->idx_seg->ib_seg_size; i++) { | |
441 | off = (unsigned long) | |
442 | ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE); | |
443 | writel(0, base_addr + off); | |
444 | } | |
445 | ||
446 | if (ib->intr->intr_type == BNA_INTR_T_INTX) { | |
447 | bna_intx_disable(ib->bna, intx_mask); | |
448 | intx_mask &= ~(ib->intr->vector); | |
449 | bna_intx_enable(ib->bna, intx_mask); | |
450 | } | |
451 | } | |
452 | ||
b7ee31c5 | 453 | static void |
8b230ed8 RM |
454 | bna_ib_stop(struct bna_ib *ib) |
455 | { | |
456 | u32 intx_mask; | |
457 | ||
458 | ib->start_count--; | |
459 | ||
460 | if (ib->start_count == 0) { | |
461 | writel(BNA_DOORBELL_IB_INT_DISABLE, | |
462 | ib->door_bell.doorbell_addr); | |
463 | if (ib->intr->intr_type == BNA_INTR_T_INTX) { | |
464 | bna_intx_disable(ib->bna, intx_mask); | |
465 | intx_mask |= (ib->intr->vector); | |
466 | bna_intx_enable(ib->bna, intx_mask); | |
467 | } | |
468 | } | |
469 | } | |
470 | ||
b7ee31c5 | 471 | static void |
8b230ed8 RM |
472 | bna_ib_fail(struct bna_ib *ib) |
473 | { | |
474 | ib->start_count = 0; | |
475 | } | |
476 | ||
477 | /** | |
478 | * RXF | |
479 | */ | |
480 | static void rxf_enable(struct bna_rxf *rxf); | |
481 | static void rxf_disable(struct bna_rxf *rxf); | |
482 | static void __rxf_config_set(struct bna_rxf *rxf); | |
483 | static void __rxf_rit_set(struct bna_rxf *rxf); | |
484 | static void __bna_rxf_stat_clr(struct bna_rxf *rxf); | |
485 | static int rxf_process_packet_filter(struct bna_rxf *rxf); | |
486 | static int rxf_clear_packet_filter(struct bna_rxf *rxf); | |
487 | static void rxf_reset_packet_filter(struct bna_rxf *rxf); | |
488 | static void rxf_cb_enabled(void *arg, int status); | |
489 | static void rxf_cb_disabled(void *arg, int status); | |
490 | static void bna_rxf_cb_stats_cleared(void *arg, int status); | |
491 | static void __rxf_enable(struct bna_rxf *rxf); | |
492 | static void __rxf_disable(struct bna_rxf *rxf); | |
493 | ||
494 | bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, | |
495 | enum bna_rxf_event); | |
496 | bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf, | |
497 | enum bna_rxf_event); | |
498 | bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf, | |
499 | enum bna_rxf_event); | |
500 | bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, | |
501 | enum bna_rxf_event); | |
502 | bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf, | |
503 | enum bna_rxf_event); | |
504 | bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf, | |
505 | enum bna_rxf_event); | |
506 | bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf, | |
507 | enum bna_rxf_event); | |
508 | bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf, | |
509 | enum bna_rxf_event); | |
510 | bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf, | |
511 | enum bna_rxf_event); | |
512 | ||
513 | static struct bfa_sm_table rxf_sm_table[] = { | |
514 | {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED}, | |
515 | {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT}, | |
516 | {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT}, | |
517 | {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED}, | |
518 | {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT}, | |
519 | {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT}, | |
520 | {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT}, | |
521 | {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT}, | |
522 | {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT} | |
523 | }; | |
524 | ||
525 | static void | |
526 | bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) | |
527 | { | |
528 | call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS); | |
529 | } | |
530 | ||
531 | static void | |
532 | bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) | |
533 | { | |
534 | switch (event) { | |
535 | case RXF_E_START: | |
536 | bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait); | |
537 | break; | |
538 | ||
539 | case RXF_E_STOP: | |
540 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
541 | break; | |
542 | ||
543 | case RXF_E_FAIL: | |
544 | /* No-op */ | |
545 | break; | |
546 | ||
547 | case RXF_E_CAM_FLTR_MOD: | |
548 | call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); | |
549 | break; | |
550 | ||
551 | case RXF_E_STARTED: | |
552 | case RXF_E_STOPPED: | |
553 | case RXF_E_CAM_FLTR_RESP: | |
554 | /** | |
555 | * These events are received due to flushing of mbox | |
556 | * when device fails | |
557 | */ | |
558 | /* No-op */ | |
559 | break; | |
560 | ||
561 | case RXF_E_PAUSE: | |
562 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; | |
563 | call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); | |
564 | break; | |
565 | ||
566 | case RXF_E_RESUME: | |
567 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; | |
568 | call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); | |
569 | break; | |
570 | ||
571 | default: | |
ac51f60f | 572 | bfa_sm_fault(event); |
8b230ed8 RM |
573 | } |
574 | } | |
575 | ||
576 | static void | |
577 | bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf) | |
578 | { | |
579 | __rxf_config_set(rxf); | |
580 | __rxf_rit_set(rxf); | |
581 | rxf_enable(rxf); | |
582 | } | |
583 | ||
584 | static void | |
585 | bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
586 | { | |
587 | switch (event) { | |
588 | case RXF_E_STOP: | |
589 | /** | |
590 | * STOP is originated from bnad. When this happens, | |
591 | * it can not be waiting for filter update | |
592 | */ | |
593 | call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); | |
594 | bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); | |
595 | break; | |
596 | ||
597 | case RXF_E_FAIL: | |
598 | call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); | |
599 | call_rxf_start_cbfn(rxf, BNA_CB_FAIL); | |
600 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
601 | break; | |
602 | ||
603 | case RXF_E_CAM_FLTR_MOD: | |
604 | /* No-op */ | |
605 | break; | |
606 | ||
607 | case RXF_E_STARTED: | |
608 | /** | |
609 | * Force rxf_process_filter() to go through initial | |
610 | * config | |
611 | */ | |
612 | if ((rxf->ucast_active_mac != NULL) && | |
613 | (rxf->ucast_pending_set == 0)) | |
614 | rxf->ucast_pending_set = 1; | |
615 | ||
616 | if (rxf->rss_status == BNA_STATUS_T_ENABLED) | |
617 | rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; | |
618 | ||
619 | rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; | |
620 | ||
621 | bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); | |
622 | break; | |
623 | ||
624 | case RXF_E_PAUSE: | |
625 | case RXF_E_RESUME: | |
626 | rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; | |
627 | break; | |
628 | ||
629 | default: | |
ac51f60f | 630 | bfa_sm_fault(event); |
8b230ed8 RM |
631 | } |
632 | } | |
633 | ||
634 | static void | |
635 | bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf) | |
636 | { | |
637 | if (!rxf_process_packet_filter(rxf)) { | |
638 | /* No more pending CAM entries to update */ | |
639 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
640 | } | |
641 | } | |
642 | ||
643 | static void | |
644 | bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
645 | { | |
646 | switch (event) { | |
647 | case RXF_E_STOP: | |
648 | /** | |
649 | * STOP is originated from bnad. When this happens, | |
650 | * it can not be waiting for filter update | |
651 | */ | |
652 | call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT); | |
653 | bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); | |
654 | break; | |
655 | ||
656 | case RXF_E_FAIL: | |
657 | rxf_reset_packet_filter(rxf); | |
658 | call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); | |
659 | call_rxf_start_cbfn(rxf, BNA_CB_FAIL); | |
660 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
661 | break; | |
662 | ||
663 | case RXF_E_CAM_FLTR_MOD: | |
664 | /* No-op */ | |
665 | break; | |
666 | ||
667 | case RXF_E_CAM_FLTR_RESP: | |
668 | if (!rxf_process_packet_filter(rxf)) { | |
669 | /* No more pending CAM entries to update */ | |
670 | call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS); | |
671 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
672 | } | |
673 | break; | |
674 | ||
675 | case RXF_E_PAUSE: | |
676 | case RXF_E_RESUME: | |
677 | rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED; | |
678 | break; | |
679 | ||
680 | default: | |
ac51f60f | 681 | bfa_sm_fault(event); |
8b230ed8 RM |
682 | } |
683 | } | |
684 | ||
685 | static void | |
686 | bna_rxf_sm_started_entry(struct bna_rxf *rxf) | |
687 | { | |
688 | call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS); | |
689 | ||
690 | if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) { | |
691 | if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) | |
692 | bfa_fsm_send_event(rxf, RXF_E_PAUSE); | |
693 | else | |
694 | bfa_fsm_send_event(rxf, RXF_E_RESUME); | |
695 | } | |
696 | ||
697 | } | |
698 | ||
699 | static void | |
700 | bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) | |
701 | { | |
702 | switch (event) { | |
703 | case RXF_E_STOP: | |
704 | bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait); | |
705 | /* Hack to get FSM start clearing CAM entries */ | |
706 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); | |
707 | break; | |
708 | ||
709 | case RXF_E_FAIL: | |
710 | rxf_reset_packet_filter(rxf); | |
711 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
712 | break; | |
713 | ||
714 | case RXF_E_CAM_FLTR_MOD: | |
715 | bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait); | |
716 | break; | |
717 | ||
718 | case RXF_E_PAUSE: | |
719 | bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait); | |
720 | break; | |
721 | ||
722 | case RXF_E_RESUME: | |
723 | bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait); | |
724 | break; | |
725 | ||
726 | default: | |
ac51f60f | 727 | bfa_sm_fault(event); |
8b230ed8 RM |
728 | } |
729 | } | |
730 | ||
731 | static void | |
732 | bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf) | |
733 | { | |
734 | /** | |
735 | * Note: Do not add rxf_clear_packet_filter here. | |
736 | * It will overstep mbox when this transition happens: | |
0120b99c | 737 | * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event |
8b230ed8 RM |
738 | */ |
739 | } | |
740 | ||
741 | static void | |
742 | bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
743 | { | |
744 | switch (event) { | |
745 | case RXF_E_FAIL: | |
746 | /** | |
747 | * FSM was in the process of stopping, initiated by | |
748 | * bnad. When this happens, no one can be waiting for | |
749 | * start or filter update | |
750 | */ | |
751 | rxf_reset_packet_filter(rxf); | |
752 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
753 | break; | |
754 | ||
755 | case RXF_E_CAM_FLTR_RESP: | |
756 | if (!rxf_clear_packet_filter(rxf)) { | |
757 | /* No more pending CAM entries to clear */ | |
758 | bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait); | |
759 | rxf_disable(rxf); | |
760 | } | |
761 | break; | |
762 | ||
763 | default: | |
ac51f60f | 764 | bfa_sm_fault(event); |
8b230ed8 RM |
765 | } |
766 | } | |
767 | ||
768 | static void | |
769 | bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf) | |
770 | { | |
771 | /** | |
772 | * NOTE: Do not add rxf_disable here. | |
773 | * It will overstep mbox when this transition happens: | |
0120b99c | 774 | * start_wait -> stop_wait on RXF_E_STOP event |
8b230ed8 RM |
775 | */ |
776 | } | |
777 | ||
778 | static void | |
779 | bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
780 | { | |
781 | switch (event) { | |
782 | case RXF_E_FAIL: | |
783 | /** | |
784 | * FSM was in the process of stopping, initiated by | |
785 | * bnad. When this happens, no one can be waiting for | |
786 | * start or filter update | |
787 | */ | |
788 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
789 | break; | |
790 | ||
791 | case RXF_E_STARTED: | |
792 | /** | |
793 | * This event is received due to abrupt transition from | |
794 | * bna_rxf_sm_start_wait state on receiving | |
795 | * RXF_E_STOP event | |
796 | */ | |
797 | rxf_disable(rxf); | |
798 | break; | |
799 | ||
800 | case RXF_E_STOPPED: | |
801 | /** | |
802 | * FSM was in the process of stopping, initiated by | |
803 | * bnad. When this happens, no one can be waiting for | |
804 | * start or filter update | |
805 | */ | |
806 | bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait); | |
807 | break; | |
808 | ||
809 | case RXF_E_PAUSE: | |
810 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; | |
811 | break; | |
812 | ||
813 | case RXF_E_RESUME: | |
814 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; | |
815 | break; | |
816 | ||
817 | default: | |
ac51f60f | 818 | bfa_sm_fault(event); |
8b230ed8 RM |
819 | } |
820 | } | |
821 | ||
822 | static void | |
823 | bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf) | |
824 | { | |
825 | rxf->rxf_flags &= | |
826 | ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED); | |
827 | __rxf_disable(rxf); | |
828 | } | |
829 | ||
830 | static void | |
831 | bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
832 | { | |
833 | switch (event) { | |
834 | case RXF_E_FAIL: | |
835 | /** | |
836 | * FSM was in the process of disabling rxf, initiated by | |
837 | * bnad. | |
838 | */ | |
839 | call_rxf_pause_cbfn(rxf, BNA_CB_FAIL); | |
840 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
841 | break; | |
842 | ||
843 | case RXF_E_STOPPED: | |
844 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED; | |
845 | call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS); | |
846 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
847 | break; | |
848 | ||
849 | /* | |
850 | * Since PAUSE/RESUME can only be sent by bnad, we don't expect | |
851 | * any other event during these states | |
852 | */ | |
853 | default: | |
ac51f60f | 854 | bfa_sm_fault(event); |
8b230ed8 RM |
855 | } |
856 | } | |
857 | ||
858 | static void | |
859 | bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf) | |
860 | { | |
861 | rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED); | |
862 | rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; | |
863 | __rxf_enable(rxf); | |
864 | } | |
865 | ||
866 | static void | |
867 | bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
868 | { | |
869 | switch (event) { | |
870 | case RXF_E_FAIL: | |
871 | /** | |
872 | * FSM was in the process of disabling rxf, initiated by | |
873 | * bnad. | |
874 | */ | |
875 | call_rxf_resume_cbfn(rxf, BNA_CB_FAIL); | |
876 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
877 | break; | |
878 | ||
879 | case RXF_E_STARTED: | |
880 | rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING; | |
881 | call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS); | |
882 | bfa_fsm_set_state(rxf, bna_rxf_sm_started); | |
883 | break; | |
884 | ||
885 | /* | |
886 | * Since PAUSE/RESUME can only be sent by bnad, we don't expect | |
887 | * any other event during these states | |
888 | */ | |
889 | default: | |
ac51f60f | 890 | bfa_sm_fault(event); |
8b230ed8 RM |
891 | } |
892 | } | |
893 | ||
894 | static void | |
895 | bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf) | |
896 | { | |
897 | __bna_rxf_stat_clr(rxf); | |
898 | } | |
899 | ||
900 | static void | |
901 | bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event) | |
902 | { | |
903 | switch (event) { | |
904 | case RXF_E_FAIL: | |
905 | case RXF_E_STAT_CLEARED: | |
906 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); | |
907 | break; | |
908 | ||
909 | default: | |
ac51f60f | 910 | bfa_sm_fault(event); |
8b230ed8 RM |
911 | } |
912 | } | |
913 | ||
914 | static void | |
915 | __rxf_enable(struct bna_rxf *rxf) | |
916 | { | |
917 | struct bfi_ll_rxf_multi_req ll_req; | |
918 | u32 bm[2] = {0, 0}; | |
919 | ||
920 | if (rxf->rxf_id < 32) | |
921 | bm[0] = 1 << rxf->rxf_id; | |
922 | else | |
923 | bm[1] = 1 << (rxf->rxf_id - 32); | |
924 | ||
925 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); | |
926 | ll_req.rxf_id_mask[0] = htonl(bm[0]); | |
927 | ll_req.rxf_id_mask[1] = htonl(bm[1]); | |
928 | ll_req.enable = 1; | |
929 | ||
930 | bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), | |
931 | rxf_cb_enabled, rxf); | |
932 | ||
933 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | |
934 | } | |
935 | ||
936 | static void | |
937 | __rxf_disable(struct bna_rxf *rxf) | |
938 | { | |
939 | struct bfi_ll_rxf_multi_req ll_req; | |
940 | u32 bm[2] = {0, 0}; | |
941 | ||
942 | if (rxf->rxf_id < 32) | |
943 | bm[0] = 1 << rxf->rxf_id; | |
944 | else | |
945 | bm[1] = 1 << (rxf->rxf_id - 32); | |
946 | ||
947 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0); | |
948 | ll_req.rxf_id_mask[0] = htonl(bm[0]); | |
949 | ll_req.rxf_id_mask[1] = htonl(bm[1]); | |
950 | ll_req.enable = 0; | |
951 | ||
952 | bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), | |
953 | rxf_cb_disabled, rxf); | |
954 | ||
955 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | |
956 | } | |
957 | ||
958 | static void | |
959 | __rxf_config_set(struct bna_rxf *rxf) | |
960 | { | |
961 | u32 i; | |
962 | struct bna_rss_mem *rss_mem; | |
963 | struct bna_rx_fndb_ram *rx_fndb_ram; | |
964 | struct bna *bna = rxf->rx->bna; | |
965 | void __iomem *base_addr; | |
966 | unsigned long off; | |
967 | ||
968 | base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, | |
969 | RSS_TABLE_BASE_OFFSET); | |
970 | ||
971 | rss_mem = (struct bna_rss_mem *)0; | |
972 | ||
973 | /* Configure RSS if required */ | |
974 | if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) { | |
975 | /* configure RSS Table */ | |
976 | writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM + | |
977 | bna->port_num, RSS_TABLE_BASE_OFFSET), | |
978 | bna->regs.page_addr); | |
979 | ||
980 | /* temporarily disable RSS, while hash value is written */ | |
981 | off = (unsigned long)&rss_mem[0].type_n_hash; | |
982 | writel(0, base_addr + off); | |
983 | ||
984 | for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) { | |
985 | off = (unsigned long) | |
986 | &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i]; | |
987 | writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]), | |
988 | base_addr + off); | |
989 | } | |
990 | ||
991 | off = (unsigned long)&rss_mem[0].type_n_hash; | |
992 | writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask, | |
993 | base_addr + off); | |
994 | } | |
995 | ||
996 | /* Configure RxF */ | |
997 | writel(BNA_GET_PAGE_NUM( | |
998 | LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2), | |
999 | RX_FNDB_RAM_BASE_OFFSET), | |
1000 | bna->regs.page_addr); | |
1001 | ||
1002 | base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, | |
1003 | RX_FNDB_RAM_BASE_OFFSET); | |
1004 | ||
1005 | rx_fndb_ram = (struct bna_rx_fndb_ram *)0; | |
1006 | ||
1007 | /* We always use RSS table 0 */ | |
1008 | off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop; | |
1009 | writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE, | |
1010 | base_addr + off); | |
1011 | ||
1012 | /* small large buffer enable/disable */ | |
1013 | off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props; | |
1014 | writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80, | |
1015 | base_addr + off); | |
1016 | ||
1017 | /* RIT offset, HDS forced offset, multicast RxQ Id */ | |
1018 | off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq; | |
1019 | writel((rxf->rit_segment->rit_offset << 16) | | |
1020 | (rxf->forced_offset << 8) | | |
1021 | (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id, | |
1022 | base_addr + off); | |
1023 | ||
1024 | /* | |
1025 | * default vlan tag, default function enable, strip vlan bytes, | |
1026 | * HDS type, header size | |
1027 | */ | |
1028 | ||
1029 | off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags; | |
1030 | writel(((u32)rxf->default_vlan_tag << 16) | | |
1031 | (rxf->ctrl_flags & | |
1032 | (BNA_RXF_CF_DEFAULT_VLAN | | |
1033 | BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE | | |
1034 | BNA_RXF_CF_VLAN_STRIP)) | | |
1035 | (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) | | |
1036 | rxf->hds_cfg.header_size, | |
1037 | base_addr + off); | |
1038 | } | |
1039 | ||
1040 | void | |
1041 | __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status) | |
1042 | { | |
1043 | struct bna *bna = rxf->rx->bna; | |
1044 | int i; | |
1045 | ||
1046 | writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + | |
1047 | (bna->port_num * 2), VLAN_RAM_BASE_OFFSET), | |
1048 | bna->regs.page_addr); | |
1049 | ||
1050 | if (status == BNA_STATUS_T_ENABLED) { | |
1051 | /* enable VLAN filtering on this function */ | |
1052 | for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { | |
1053 | writel(rxf->vlan_filter_table[i], | |
1054 | BNA_GET_VLAN_MEM_ENTRY_ADDR | |
1055 | (bna->pcidev.pci_bar_kva, rxf->rxf_id, | |
1056 | i * 32)); | |
1057 | } | |
1058 | } else { | |
1059 | /* disable VLAN filtering on this function */ | |
1060 | for (i = 0; i <= BFI_MAX_VLAN / 32; i++) { | |
1061 | writel(0xffffffff, | |
1062 | BNA_GET_VLAN_MEM_ENTRY_ADDR | |
1063 | (bna->pcidev.pci_bar_kva, rxf->rxf_id, | |
1064 | i * 32)); | |
1065 | } | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | static void | |
1070 | __rxf_rit_set(struct bna_rxf *rxf) | |
1071 | { | |
1072 | struct bna *bna = rxf->rx->bna; | |
1073 | struct bna_rit_mem *rit_mem; | |
1074 | int i; | |
1075 | void __iomem *base_addr; | |
1076 | unsigned long off; | |
1077 | ||
1078 | base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, | |
1079 | FUNCTION_TO_RXQ_TRANSLATE); | |
1080 | ||
1081 | rit_mem = (struct bna_rit_mem *)0; | |
1082 | ||
1083 | writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num, | |
1084 | FUNCTION_TO_RXQ_TRANSLATE), | |
1085 | bna->regs.page_addr); | |
1086 | ||
1087 | for (i = 0; i < rxf->rit_segment->rit_size; i++) { | |
1088 | off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset]; | |
1089 | writel(rxf->rit_segment->rit[i].large_rxq_id << 6 | | |
1090 | rxf->rit_segment->rit[i].small_rxq_id, | |
1091 | base_addr + off); | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | static void | |
1096 | __bna_rxf_stat_clr(struct bna_rxf *rxf) | |
1097 | { | |
1098 | struct bfi_ll_stats_req ll_req; | |
1099 | u32 bm[2] = {0, 0}; | |
1100 | ||
1101 | if (rxf->rxf_id < 32) | |
1102 | bm[0] = 1 << rxf->rxf_id; | |
1103 | else | |
1104 | bm[1] = 1 << (rxf->rxf_id - 32); | |
1105 | ||
1106 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); | |
1107 | ll_req.stats_mask = 0; | |
1108 | ll_req.txf_id_mask[0] = 0; | |
1109 | ll_req.txf_id_mask[1] = 0; | |
1110 | ||
1111 | ll_req.rxf_id_mask[0] = htonl(bm[0]); | |
1112 | ll_req.rxf_id_mask[1] = htonl(bm[1]); | |
1113 | ||
1114 | bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req), | |
1115 | bna_rxf_cb_stats_cleared, rxf); | |
1116 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | |
1117 | } | |
1118 | ||
1119 | static void | |
1120 | rxf_enable(struct bna_rxf *rxf) | |
1121 | { | |
1122 | if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) | |
1123 | bfa_fsm_send_event(rxf, RXF_E_STARTED); | |
1124 | else { | |
1125 | rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED; | |
1126 | __rxf_enable(rxf); | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | static void | |
1131 | rxf_cb_enabled(void *arg, int status) | |
1132 | { | |
1133 | struct bna_rxf *rxf = (struct bna_rxf *)arg; | |
1134 | ||
1135 | bfa_q_qe_init(&rxf->mbox_qe.qe); | |
1136 | bfa_fsm_send_event(rxf, RXF_E_STARTED); | |
1137 | } | |
1138 | ||
1139 | static void | |
1140 | rxf_disable(struct bna_rxf *rxf) | |
1141 | { | |
1142 | if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) | |
1143 | bfa_fsm_send_event(rxf, RXF_E_STOPPED); | |
1144 | else | |
1145 | rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED; | |
1146 | __rxf_disable(rxf); | |
1147 | } | |
1148 | ||
1149 | static void | |
1150 | rxf_cb_disabled(void *arg, int status) | |
1151 | { | |
1152 | struct bna_rxf *rxf = (struct bna_rxf *)arg; | |
1153 | ||
1154 | bfa_q_qe_init(&rxf->mbox_qe.qe); | |
1155 | bfa_fsm_send_event(rxf, RXF_E_STOPPED); | |
1156 | } | |
1157 | ||
1158 | void | |
1159 | rxf_cb_cam_fltr_mbox_cmd(void *arg, int status) | |
1160 | { | |
1161 | struct bna_rxf *rxf = (struct bna_rxf *)arg; | |
1162 | ||
1163 | bfa_q_qe_init(&rxf->mbox_qe.qe); | |
1164 | ||
1165 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP); | |
1166 | } | |
1167 | ||
1168 | static void | |
1169 | bna_rxf_cb_stats_cleared(void *arg, int status) | |
1170 | { | |
1171 | struct bna_rxf *rxf = (struct bna_rxf *)arg; | |
1172 | ||
1173 | bfa_q_qe_init(&rxf->mbox_qe.qe); | |
1174 | bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED); | |
1175 | } | |
1176 | ||
1177 | void | |
1178 | rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd, | |
1179 | const struct bna_mac *mac_addr) | |
1180 | { | |
1181 | struct bfi_ll_mac_addr_req req; | |
1182 | ||
1183 | bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0); | |
1184 | ||
1185 | req.rxf_id = rxf->rxf_id; | |
1186 | memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN); | |
1187 | ||
1188 | bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req), | |
1189 | rxf_cb_cam_fltr_mbox_cmd, rxf); | |
1190 | ||
1191 | bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); | |
1192 | } | |
1193 | ||
1194 | static int | |
1195 | rxf_process_packet_filter_mcast(struct bna_rxf *rxf) | |
1196 | { | |
1197 | struct bna_mac *mac = NULL; | |
1198 | struct list_head *qe; | |
1199 | ||
1200 | /* Add multicast entries */ | |
1201 | if (!list_empty(&rxf->mcast_pending_add_q)) { | |
1202 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
1203 | bfa_q_qe_init(qe); | |
1204 | mac = (struct bna_mac *)qe; | |
1205 | rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac); | |
1206 | list_add_tail(&mac->qe, &rxf->mcast_active_q); | |
1207 | return 1; | |
1208 | } | |
1209 | ||
1210 | /* Delete multicast entries previousely added */ | |
1211 | if (!list_empty(&rxf->mcast_pending_del_q)) { | |
1212 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
1213 | bfa_q_qe_init(qe); | |
1214 | mac = (struct bna_mac *)qe; | |
1215 | rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); | |
1216 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1217 | return 1; | |
1218 | } | |
1219 | ||
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | static int | |
1224 | rxf_process_packet_filter_vlan(struct bna_rxf *rxf) | |
1225 | { | |
1226 | /* Apply the VLAN filter */ | |
1227 | if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { | |
1228 | rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; | |
ce9b9f38 | 1229 | if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC)) |
8b230ed8 RM |
1230 | __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); |
1231 | } | |
1232 | ||
1233 | /* Apply RSS configuration */ | |
1234 | if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) { | |
1235 | rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING; | |
1236 | if (rxf->rss_status == BNA_STATUS_T_DISABLED) { | |
1237 | /* RSS is being disabled */ | |
1238 | rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE; | |
1239 | __rxf_rit_set(rxf); | |
1240 | __rxf_config_set(rxf); | |
1241 | } else { | |
1242 | /* RSS is being enabled or reconfigured */ | |
1243 | rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE; | |
1244 | __rxf_rit_set(rxf); | |
1245 | __rxf_config_set(rxf); | |
1246 | } | |
1247 | } | |
1248 | ||
1249 | return 0; | |
1250 | } | |
1251 | ||
1252 | /** | |
1253 | * Processes pending ucast, mcast entry addition/deletion and issues mailbox | |
1254 | * command. Also processes pending filter configuration - promiscuous mode, | |
1255 | * default mode, allmutli mode and issues mailbox command or directly applies | |
1256 | * to h/w | |
1257 | */ | |
1258 | static int | |
1259 | rxf_process_packet_filter(struct bna_rxf *rxf) | |
1260 | { | |
1261 | /* Set the default MAC first */ | |
1262 | if (rxf->ucast_pending_set > 0) { | |
1263 | rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ, | |
1264 | rxf->ucast_active_mac); | |
1265 | rxf->ucast_pending_set--; | |
1266 | return 1; | |
1267 | } | |
1268 | ||
1269 | if (rxf_process_packet_filter_ucast(rxf)) | |
1270 | return 1; | |
1271 | ||
1272 | if (rxf_process_packet_filter_mcast(rxf)) | |
1273 | return 1; | |
1274 | ||
1275 | if (rxf_process_packet_filter_promisc(rxf)) | |
1276 | return 1; | |
1277 | ||
8b230ed8 RM |
1278 | if (rxf_process_packet_filter_allmulti(rxf)) |
1279 | return 1; | |
1280 | ||
1281 | if (rxf_process_packet_filter_vlan(rxf)) | |
1282 | return 1; | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | static int | |
1288 | rxf_clear_packet_filter_mcast(struct bna_rxf *rxf) | |
1289 | { | |
1290 | struct bna_mac *mac = NULL; | |
1291 | struct list_head *qe; | |
1292 | ||
1293 | /* 3. delete pending mcast entries */ | |
1294 | if (!list_empty(&rxf->mcast_pending_del_q)) { | |
1295 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
1296 | bfa_q_qe_init(qe); | |
1297 | mac = (struct bna_mac *)qe; | |
1298 | rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); | |
1299 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1300 | return 1; | |
1301 | } | |
1302 | ||
1303 | /* 4. clear active mcast entries; move them to pending_add_q */ | |
1304 | if (!list_empty(&rxf->mcast_active_q)) { | |
1305 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
1306 | bfa_q_qe_init(qe); | |
1307 | mac = (struct bna_mac *)qe; | |
1308 | rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac); | |
1309 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
1310 | return 1; | |
1311 | } | |
1312 | ||
1313 | return 0; | |
1314 | } | |
1315 | ||
1316 | /** | |
1317 | * In the rxf stop path, processes pending ucast/mcast delete queue and issues | |
1318 | * the mailbox command. Moves the active ucast/mcast entries to pending add q, | |
1319 | * so that they are added to CAM again in the rxf start path. Moves the current | |
1320 | * filter settings - promiscuous, default, allmutli - to pending filter | |
1321 | * configuration | |
1322 | */ | |
1323 | static int | |
1324 | rxf_clear_packet_filter(struct bna_rxf *rxf) | |
1325 | { | |
1326 | if (rxf_clear_packet_filter_ucast(rxf)) | |
1327 | return 1; | |
1328 | ||
1329 | if (rxf_clear_packet_filter_mcast(rxf)) | |
1330 | return 1; | |
1331 | ||
1332 | /* 5. clear active default MAC in the CAM */ | |
1333 | if (rxf->ucast_pending_set > 0) | |
1334 | rxf->ucast_pending_set = 0; | |
1335 | ||
1336 | if (rxf_clear_packet_filter_promisc(rxf)) | |
1337 | return 1; | |
1338 | ||
8b230ed8 RM |
1339 | if (rxf_clear_packet_filter_allmulti(rxf)) |
1340 | return 1; | |
1341 | ||
1342 | return 0; | |
1343 | } | |
1344 | ||
1345 | static void | |
1346 | rxf_reset_packet_filter_mcast(struct bna_rxf *rxf) | |
1347 | { | |
1348 | struct list_head *qe; | |
1349 | struct bna_mac *mac; | |
1350 | ||
1351 | /* 3. Move active mcast entries to pending_add_q */ | |
1352 | while (!list_empty(&rxf->mcast_active_q)) { | |
1353 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
1354 | bfa_q_qe_init(qe); | |
1355 | list_add_tail(qe, &rxf->mcast_pending_add_q); | |
1356 | } | |
1357 | ||
1358 | /* 4. Throw away delete pending mcast entries */ | |
1359 | while (!list_empty(&rxf->mcast_pending_del_q)) { | |
1360 | bfa_q_deq(&rxf->mcast_pending_del_q, &qe); | |
1361 | bfa_q_qe_init(qe); | |
1362 | mac = (struct bna_mac *)qe; | |
1363 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1364 | } | |
1365 | } | |
1366 | ||
1367 | /** | |
1368 | * In the rxf fail path, throws away the ucast/mcast entries pending for | |
1369 | * deletion, moves all active ucast/mcast entries to pending queue so that | |
1370 | * they are added back to CAM in the rxf start path. Also moves the current | |
1371 | * filter configuration to pending filter configuration. | |
1372 | */ | |
1373 | static void | |
1374 | rxf_reset_packet_filter(struct bna_rxf *rxf) | |
1375 | { | |
1376 | rxf_reset_packet_filter_ucast(rxf); | |
1377 | ||
1378 | rxf_reset_packet_filter_mcast(rxf); | |
1379 | ||
1380 | /* 5. Turn off ucast set flag */ | |
1381 | rxf->ucast_pending_set = 0; | |
1382 | ||
1383 | rxf_reset_packet_filter_promisc(rxf); | |
1384 | ||
8b230ed8 RM |
1385 | rxf_reset_packet_filter_allmulti(rxf); |
1386 | } | |
1387 | ||
b7ee31c5 | 1388 | static void |
8b230ed8 RM |
1389 | bna_rxf_init(struct bna_rxf *rxf, |
1390 | struct bna_rx *rx, | |
1391 | struct bna_rx_config *q_config) | |
1392 | { | |
1393 | struct list_head *qe; | |
1394 | struct bna_rxp *rxp; | |
1395 | ||
1396 | /* rxf_id is initialized during rx_mod init */ | |
1397 | rxf->rx = rx; | |
1398 | ||
1399 | INIT_LIST_HEAD(&rxf->ucast_pending_add_q); | |
1400 | INIT_LIST_HEAD(&rxf->ucast_pending_del_q); | |
1401 | rxf->ucast_pending_set = 0; | |
1402 | INIT_LIST_HEAD(&rxf->ucast_active_q); | |
1403 | rxf->ucast_active_mac = NULL; | |
1404 | ||
1405 | INIT_LIST_HEAD(&rxf->mcast_pending_add_q); | |
1406 | INIT_LIST_HEAD(&rxf->mcast_pending_del_q); | |
1407 | INIT_LIST_HEAD(&rxf->mcast_active_q); | |
1408 | ||
1409 | bfa_q_qe_init(&rxf->mbox_qe.qe); | |
1410 | ||
1411 | if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED) | |
1412 | rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP; | |
1413 | ||
1414 | rxf->rxf_oper_state = (q_config->paused) ? | |
1415 | BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING; | |
1416 | ||
1417 | bna_rxf_adv_init(rxf, rx, q_config); | |
1418 | ||
1419 | rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod, | |
1420 | q_config->num_paths); | |
1421 | ||
1422 | list_for_each(qe, &rx->rxp_q) { | |
1423 | rxp = (struct bna_rxp *)qe; | |
1424 | if (q_config->rxp_type == BNA_RXP_SINGLE) | |
1425 | rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id; | |
1426 | else | |
1427 | rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id; | |
1428 | break; | |
1429 | } | |
1430 | ||
1431 | rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; | |
1432 | memset(rxf->vlan_filter_table, 0, | |
1433 | (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); | |
1434 | ||
886f7fed RM |
1435 | /* Set up VLAN 0 for pure priority tagged packets */ |
1436 | rxf->vlan_filter_table[0] |= 1; | |
1437 | ||
8b230ed8 RM |
1438 | bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); |
1439 | } | |
1440 | ||
b7ee31c5 | 1441 | static void |
8b230ed8 RM |
1442 | bna_rxf_uninit(struct bna_rxf *rxf) |
1443 | { | |
886f7fed | 1444 | struct bna *bna = rxf->rx->bna; |
8b230ed8 RM |
1445 | struct bna_mac *mac; |
1446 | ||
1447 | bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); | |
1448 | rxf->rit_segment = NULL; | |
1449 | ||
1450 | rxf->ucast_pending_set = 0; | |
1451 | ||
1452 | while (!list_empty(&rxf->ucast_pending_add_q)) { | |
1453 | bfa_q_deq(&rxf->ucast_pending_add_q, &mac); | |
1454 | bfa_q_qe_init(&mac->qe); | |
1455 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); | |
1456 | } | |
1457 | ||
1458 | if (rxf->ucast_active_mac) { | |
1459 | bfa_q_qe_init(&rxf->ucast_active_mac->qe); | |
1460 | bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, | |
1461 | rxf->ucast_active_mac); | |
1462 | rxf->ucast_active_mac = NULL; | |
1463 | } | |
1464 | ||
1465 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
1466 | bfa_q_deq(&rxf->mcast_pending_add_q, &mac); | |
1467 | bfa_q_qe_init(&mac->qe); | |
1468 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1469 | } | |
1470 | ||
886f7fed RM |
1471 | /* Turn off pending promisc mode */ |
1472 | if (is_promisc_enable(rxf->rxmode_pending, | |
1473 | rxf->rxmode_pending_bitmask)) { | |
1474 | /* system promisc state should be pending */ | |
1475 | BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id)); | |
1476 | promisc_inactive(rxf->rxmode_pending, | |
1477 | rxf->rxmode_pending_bitmask); | |
1478 | bna->rxf_promisc_id = BFI_MAX_RXF; | |
1479 | } | |
1480 | /* Promisc mode should not be active */ | |
1481 | BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC); | |
1482 | ||
1483 | /* Turn off pending all-multi mode */ | |
1484 | if (is_allmulti_enable(rxf->rxmode_pending, | |
1485 | rxf->rxmode_pending_bitmask)) { | |
1486 | allmulti_inactive(rxf->rxmode_pending, | |
1487 | rxf->rxmode_pending_bitmask); | |
1488 | } | |
1489 | /* Allmulti mode should not be active */ | |
1490 | BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI); | |
1491 | ||
8b230ed8 RM |
1492 | rxf->rx = NULL; |
1493 | } | |
1494 | ||
b7ee31c5 RM |
1495 | static void |
1496 | bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status) | |
1497 | { | |
1498 | bfa_fsm_send_event(rx, RX_E_RXF_STARTED); | |
1499 | if (rx->rxf.rxf_id < 32) | |
1500 | rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id); | |
1501 | else | |
1502 | rx->bna->rx_mod.rxf_bmap[1] |= ((u32) | |
1503 | 1 << (rx->rxf.rxf_id - 32)); | |
1504 | } | |
1505 | ||
1506 | static void | |
8b230ed8 RM |
1507 | bna_rxf_start(struct bna_rxf *rxf) |
1508 | { | |
1509 | rxf->start_cbfn = bna_rx_cb_rxf_started; | |
1510 | rxf->start_cbarg = rxf->rx; | |
1511 | rxf->rxf_flags &= ~BNA_RXF_FL_FAILED; | |
1512 | bfa_fsm_send_event(rxf, RXF_E_START); | |
1513 | } | |
1514 | ||
b7ee31c5 RM |
1515 | static void |
1516 | bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status) | |
1517 | { | |
1518 | bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); | |
1519 | if (rx->rxf.rxf_id < 32) | |
1520 | rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id; | |
1521 | else | |
1522 | rx->bna->rx_mod.rxf_bmap[1] &= ~(u32) | |
1523 | 1 << (rx->rxf.rxf_id - 32); | |
1524 | } | |
1525 | ||
1526 | static void | |
8b230ed8 RM |
1527 | bna_rxf_stop(struct bna_rxf *rxf) |
1528 | { | |
1529 | rxf->stop_cbfn = bna_rx_cb_rxf_stopped; | |
1530 | rxf->stop_cbarg = rxf->rx; | |
1531 | bfa_fsm_send_event(rxf, RXF_E_STOP); | |
1532 | } | |
1533 | ||
b7ee31c5 | 1534 | static void |
8b230ed8 RM |
1535 | bna_rxf_fail(struct bna_rxf *rxf) |
1536 | { | |
1537 | rxf->rxf_flags |= BNA_RXF_FL_FAILED; | |
1538 | bfa_fsm_send_event(rxf, RXF_E_FAIL); | |
1539 | } | |
1540 | ||
1541 | int | |
1542 | bna_rxf_state_get(struct bna_rxf *rxf) | |
1543 | { | |
1544 | return bfa_sm_to_state(rxf_sm_table, rxf->fsm); | |
1545 | } | |
1546 | ||
1547 | enum bna_cb_status | |
1548 | bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, | |
1549 | void (*cbfn)(struct bnad *, struct bna_rx *, | |
1550 | enum bna_cb_status)) | |
1551 | { | |
1552 | struct bna_rxf *rxf = &rx->rxf; | |
1553 | ||
1554 | if (rxf->ucast_active_mac == NULL) { | |
1555 | rxf->ucast_active_mac = | |
1556 | bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); | |
1557 | if (rxf->ucast_active_mac == NULL) | |
1558 | return BNA_CB_UCAST_CAM_FULL; | |
1559 | bfa_q_qe_init(&rxf->ucast_active_mac->qe); | |
1560 | } | |
1561 | ||
1562 | memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN); | |
1563 | rxf->ucast_pending_set++; | |
1564 | rxf->cam_fltr_cbfn = cbfn; | |
1565 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
1566 | ||
1567 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | |
1568 | ||
1569 | return BNA_CB_SUCCESS; | |
1570 | } | |
1571 | ||
1572 | enum bna_cb_status | |
1573 | bna_rx_mcast_add(struct bna_rx *rx, u8 *addr, | |
1574 | void (*cbfn)(struct bnad *, struct bna_rx *, | |
1575 | enum bna_cb_status)) | |
1576 | { | |
1577 | struct bna_rxf *rxf = &rx->rxf; | |
1578 | struct list_head *qe; | |
1579 | struct bna_mac *mac; | |
1580 | ||
1581 | /* Check if already added */ | |
1582 | list_for_each(qe, &rxf->mcast_active_q) { | |
1583 | mac = (struct bna_mac *)qe; | |
1584 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | |
1585 | if (cbfn) | |
1586 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | |
1587 | return BNA_CB_SUCCESS; | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | /* Check if pending addition */ | |
1592 | list_for_each(qe, &rxf->mcast_pending_add_q) { | |
1593 | mac = (struct bna_mac *)qe; | |
1594 | if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { | |
1595 | if (cbfn) | |
1596 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | |
1597 | return BNA_CB_SUCCESS; | |
1598 | } | |
1599 | } | |
1600 | ||
1601 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | |
1602 | if (mac == NULL) | |
1603 | return BNA_CB_MCAST_LIST_FULL; | |
1604 | bfa_q_qe_init(&mac->qe); | |
1605 | memcpy(mac->addr, addr, ETH_ALEN); | |
1606 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
1607 | ||
1608 | rxf->cam_fltr_cbfn = cbfn; | |
1609 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
1610 | ||
1611 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | |
1612 | ||
1613 | return BNA_CB_SUCCESS; | |
1614 | } | |
1615 | ||
8b230ed8 RM |
1616 | enum bna_cb_status |
1617 | bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, | |
1618 | void (*cbfn)(struct bnad *, struct bna_rx *, | |
1619 | enum bna_cb_status)) | |
1620 | { | |
1621 | struct bna_rxf *rxf = &rx->rxf; | |
1622 | struct list_head list_head; | |
1623 | struct list_head *qe; | |
1624 | u8 *mcaddr; | |
1625 | struct bna_mac *mac; | |
1626 | struct bna_mac *mac1; | |
1627 | int skip; | |
1628 | int delete; | |
1629 | int need_hw_config = 0; | |
1630 | int i; | |
1631 | ||
1632 | /* Allocate nodes */ | |
1633 | INIT_LIST_HEAD(&list_head); | |
1634 | for (i = 0, mcaddr = mclist; i < count; i++) { | |
1635 | mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod); | |
1636 | if (mac == NULL) | |
1637 | goto err_return; | |
1638 | bfa_q_qe_init(&mac->qe); | |
1639 | memcpy(mac->addr, mcaddr, ETH_ALEN); | |
1640 | list_add_tail(&mac->qe, &list_head); | |
1641 | ||
1642 | mcaddr += ETH_ALEN; | |
1643 | } | |
1644 | ||
1645 | /* Schedule for addition */ | |
1646 | while (!list_empty(&list_head)) { | |
1647 | bfa_q_deq(&list_head, &qe); | |
1648 | mac = (struct bna_mac *)qe; | |
1649 | bfa_q_qe_init(&mac->qe); | |
1650 | ||
1651 | skip = 0; | |
1652 | ||
1653 | /* Skip if already added */ | |
1654 | list_for_each(qe, &rxf->mcast_active_q) { | |
1655 | mac1 = (struct bna_mac *)qe; | |
1656 | if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) { | |
1657 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, | |
1658 | mac); | |
1659 | skip = 1; | |
1660 | break; | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | if (skip) | |
1665 | continue; | |
1666 | ||
1667 | /* Skip if pending addition */ | |
1668 | list_for_each(qe, &rxf->mcast_pending_add_q) { | |
1669 | mac1 = (struct bna_mac *)qe; | |
1670 | if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) { | |
1671 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, | |
1672 | mac); | |
1673 | skip = 1; | |
1674 | break; | |
1675 | } | |
1676 | } | |
1677 | ||
1678 | if (skip) | |
1679 | continue; | |
1680 | ||
1681 | need_hw_config = 1; | |
1682 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
1683 | } | |
1684 | ||
1685 | /** | |
1686 | * Delete the entries that are in the pending_add_q but not | |
1687 | * in the new list | |
1688 | */ | |
1689 | while (!list_empty(&rxf->mcast_pending_add_q)) { | |
1690 | bfa_q_deq(&rxf->mcast_pending_add_q, &qe); | |
1691 | mac = (struct bna_mac *)qe; | |
1692 | bfa_q_qe_init(&mac->qe); | |
1693 | for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) { | |
1694 | if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) { | |
1695 | delete = 0; | |
1696 | break; | |
1697 | } | |
1698 | mcaddr += ETH_ALEN; | |
1699 | } | |
1700 | if (delete) | |
1701 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1702 | else | |
1703 | list_add_tail(&mac->qe, &list_head); | |
1704 | } | |
1705 | while (!list_empty(&list_head)) { | |
1706 | bfa_q_deq(&list_head, &qe); | |
1707 | mac = (struct bna_mac *)qe; | |
1708 | bfa_q_qe_init(&mac->qe); | |
1709 | list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); | |
1710 | } | |
1711 | ||
1712 | /** | |
1713 | * Schedule entries for deletion that are in the active_q but not | |
1714 | * in the new list | |
1715 | */ | |
1716 | while (!list_empty(&rxf->mcast_active_q)) { | |
1717 | bfa_q_deq(&rxf->mcast_active_q, &qe); | |
1718 | mac = (struct bna_mac *)qe; | |
1719 | bfa_q_qe_init(&mac->qe); | |
1720 | for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) { | |
1721 | if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) { | |
1722 | delete = 0; | |
1723 | break; | |
1724 | } | |
1725 | mcaddr += ETH_ALEN; | |
1726 | } | |
1727 | if (delete) { | |
1728 | list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); | |
1729 | need_hw_config = 1; | |
1730 | } else { | |
1731 | list_add_tail(&mac->qe, &list_head); | |
1732 | } | |
1733 | } | |
1734 | while (!list_empty(&list_head)) { | |
1735 | bfa_q_deq(&list_head, &qe); | |
1736 | mac = (struct bna_mac *)qe; | |
1737 | bfa_q_qe_init(&mac->qe); | |
1738 | list_add_tail(&mac->qe, &rxf->mcast_active_q); | |
1739 | } | |
1740 | ||
1741 | if (need_hw_config) { | |
1742 | rxf->cam_fltr_cbfn = cbfn; | |
1743 | rxf->cam_fltr_cbarg = rx->bna->bnad; | |
1744 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | |
1745 | } else if (cbfn) | |
1746 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | |
1747 | ||
1748 | return BNA_CB_SUCCESS; | |
1749 | ||
1750 | err_return: | |
1751 | while (!list_empty(&list_head)) { | |
1752 | bfa_q_deq(&list_head, &qe); | |
1753 | mac = (struct bna_mac *)qe; | |
1754 | bfa_q_qe_init(&mac->qe); | |
1755 | bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); | |
1756 | } | |
1757 | ||
1758 | return BNA_CB_MCAST_LIST_FULL; | |
1759 | } | |
1760 | ||
1761 | void | |
1762 | bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) | |
1763 | { | |
1764 | struct bna_rxf *rxf = &rx->rxf; | |
1765 | int index = (vlan_id >> 5); | |
1766 | int bit = (1 << (vlan_id & 0x1F)); | |
1767 | ||
1768 | rxf->vlan_filter_table[index] |= bit; | |
1769 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
1770 | rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; | |
1771 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | |
1772 | } | |
1773 | } | |
1774 | ||
1775 | void | |
1776 | bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) | |
1777 | { | |
1778 | struct bna_rxf *rxf = &rx->rxf; | |
1779 | int index = (vlan_id >> 5); | |
1780 | int bit = (1 << (vlan_id & 0x1F)); | |
1781 | ||
1782 | rxf->vlan_filter_table[index] &= ~bit; | |
1783 | if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { | |
1784 | rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; | |
1785 | bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | /** | |
1790 | * RX | |
1791 | */ | |
1792 | #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \ | |
1793 | struct bna_doorbell_qset *_qset; \ | |
1794 | unsigned long off; \ | |
1795 | (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \ | |
1796 | (q)->rcb->q_depth = (qdepth); \ | |
1797 | (q)->rcb->unmap_q = unmapq_mem; \ | |
1798 | (q)->rcb->rxq = (q); \ | |
1799 | (q)->rcb->cq = &(rxp)->cq; \ | |
1800 | (q)->rcb->bnad = (bna)->bnad; \ | |
1801 | _qset = (struct bna_doorbell_qset *)0; \ | |
1802 | off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \ | |
1803 | (q)->rcb->q_dbell = off + \ | |
1804 | BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \ | |
1805 | (q)->rcb->id = _id; \ | |
1806 | } while (0) | |
1807 | ||
1808 | #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ | |
1809 | (qcfg)->num_paths : ((qcfg)->num_paths * 2)) | |
1810 | ||
1811 | #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ | |
1812 | (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) | |
1813 | ||
1814 | #define call_rx_stop_callback(rx, status) \ | |
1815 | if ((rx)->stop_cbfn) { \ | |
1816 | (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \ | |
1817 | (rx)->stop_cbfn = NULL; \ | |
1818 | (rx)->stop_cbarg = NULL; \ | |
1819 | } | |
1820 | ||
1821 | /* | |
1822 | * Since rx_enable is synchronous callback, there is no start_cbfn required. | |
1823 | * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers | |
1824 | * for each rxpath. | |
1825 | */ | |
1826 | ||
1827 | #define call_rx_disable_cbfn(rx, status) \ | |
1828 | if ((rx)->disable_cbfn) { \ | |
1829 | (*(rx)->disable_cbfn)((rx)->disable_cbarg, \ | |
1830 | status); \ | |
1831 | (rx)->disable_cbfn = NULL; \ | |
1832 | (rx)->disable_cbarg = NULL; \ | |
1833 | } \ | |
1834 | ||
1835 | #define rxqs_reqd(type, num_rxqs) \ | |
1836 | (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2)) | |
1837 | ||
1838 | #define rx_ib_fail(rx) \ | |
1839 | do { \ | |
1840 | struct bna_rxp *rxp; \ | |
1841 | struct list_head *qe; \ | |
1842 | list_for_each(qe, &(rx)->rxp_q) { \ | |
1843 | rxp = (struct bna_rxp *)qe; \ | |
1844 | bna_ib_fail(rxp->cq.ib); \ | |
1845 | } \ | |
1846 | } while (0) | |
1847 | ||
1848 | static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *); | |
1849 | static void __bna_rxq_start(struct bna_rxq *rxq); | |
1850 | static void __bna_cq_start(struct bna_cq *cq); | |
1851 | static void bna_rit_create(struct bna_rx *rx); | |
1852 | static void bna_rx_cb_multi_rxq_stopped(void *arg, int status); | |
1853 | static void bna_rx_cb_rxq_stopped_all(void *arg); | |
1854 | ||
1855 | bfa_fsm_state_decl(bna_rx, stopped, | |
1856 | struct bna_rx, enum bna_rx_event); | |
1857 | bfa_fsm_state_decl(bna_rx, rxf_start_wait, | |
1858 | struct bna_rx, enum bna_rx_event); | |
1859 | bfa_fsm_state_decl(bna_rx, started, | |
1860 | struct bna_rx, enum bna_rx_event); | |
1861 | bfa_fsm_state_decl(bna_rx, rxf_stop_wait, | |
1862 | struct bna_rx, enum bna_rx_event); | |
1863 | bfa_fsm_state_decl(bna_rx, rxq_stop_wait, | |
1864 | struct bna_rx, enum bna_rx_event); | |
1865 | ||
b7ee31c5 | 1866 | static const struct bfa_sm_table rx_sm_table[] = { |
8b230ed8 RM |
1867 | {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED}, |
1868 | {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT}, | |
1869 | {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED}, | |
1870 | {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT}, | |
1871 | {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT}, | |
1872 | }; | |
1873 | ||
1874 | static void bna_rx_sm_stopped_entry(struct bna_rx *rx) | |
1875 | { | |
1876 | struct bna_rxp *rxp; | |
1877 | struct list_head *qe_rxp; | |
1878 | ||
1879 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1880 | rxp = (struct bna_rxp *)qe_rxp; | |
1881 | rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb); | |
1882 | } | |
1883 | ||
1884 | call_rx_stop_callback(rx, BNA_CB_SUCCESS); | |
1885 | } | |
1886 | ||
1887 | static void bna_rx_sm_stopped(struct bna_rx *rx, | |
1888 | enum bna_rx_event event) | |
1889 | { | |
1890 | switch (event) { | |
1891 | case RX_E_START: | |
1892 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); | |
1893 | break; | |
1894 | case RX_E_STOP: | |
1895 | call_rx_stop_callback(rx, BNA_CB_SUCCESS); | |
1896 | break; | |
1897 | case RX_E_FAIL: | |
1898 | /* no-op */ | |
1899 | break; | |
1900 | default: | |
ac51f60f | 1901 | bfa_sm_fault(event); |
8b230ed8 RM |
1902 | break; |
1903 | } | |
1904 | ||
1905 | } | |
1906 | ||
1907 | static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) | |
1908 | { | |
1909 | struct bna_rxp *rxp; | |
1910 | struct list_head *qe_rxp; | |
1911 | struct bna_rxq *q0 = NULL, *q1 = NULL; | |
1912 | ||
1913 | /* Setup the RIT */ | |
1914 | bna_rit_create(rx); | |
1915 | ||
1916 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1917 | rxp = (struct bna_rxp *)qe_rxp; | |
1918 | bna_ib_start(rxp->cq.ib); | |
1919 | GET_RXQS(rxp, q0, q1); | |
1920 | q0->buffer_size = bna_port_mtu_get(&rx->bna->port); | |
1921 | __bna_rxq_start(q0); | |
1922 | rx->rx_post_cbfn(rx->bna->bnad, q0->rcb); | |
1923 | if (q1) { | |
1924 | __bna_rxq_start(q1); | |
1925 | rx->rx_post_cbfn(rx->bna->bnad, q1->rcb); | |
1926 | } | |
1927 | __bna_cq_start(&rxp->cq); | |
1928 | } | |
1929 | ||
1930 | bna_rxf_start(&rx->rxf); | |
1931 | } | |
1932 | ||
1933 | static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, | |
1934 | enum bna_rx_event event) | |
1935 | { | |
1936 | switch (event) { | |
1937 | case RX_E_STOP: | |
1938 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); | |
1939 | break; | |
1940 | case RX_E_FAIL: | |
1941 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
1942 | rx_ib_fail(rx); | |
1943 | bna_rxf_fail(&rx->rxf); | |
1944 | break; | |
1945 | case RX_E_RXF_STARTED: | |
1946 | bfa_fsm_set_state(rx, bna_rx_sm_started); | |
1947 | break; | |
1948 | default: | |
ac51f60f | 1949 | bfa_sm_fault(event); |
8b230ed8 RM |
1950 | break; |
1951 | } | |
1952 | } | |
1953 | ||
1954 | void | |
1955 | bna_rx_sm_started_entry(struct bna_rx *rx) | |
1956 | { | |
1957 | struct bna_rxp *rxp; | |
1958 | struct list_head *qe_rxp; | |
1959 | ||
1960 | /* Start IB */ | |
1961 | list_for_each(qe_rxp, &rx->rxp_q) { | |
1962 | rxp = (struct bna_rxp *)qe_rxp; | |
1963 | bna_ib_ack(&rxp->cq.ib->door_bell, 0); | |
1964 | } | |
1965 | ||
0613ecfc | 1966 | bna_llport_rx_started(&rx->bna->port.llport); |
8b230ed8 RM |
1967 | } |
1968 | ||
1969 | void | |
1970 | bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) | |
1971 | { | |
1972 | switch (event) { | |
1973 | case RX_E_FAIL: | |
0613ecfc | 1974 | bna_llport_rx_stopped(&rx->bna->port.llport); |
8b230ed8 RM |
1975 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); |
1976 | rx_ib_fail(rx); | |
1977 | bna_rxf_fail(&rx->rxf); | |
1978 | break; | |
1979 | case RX_E_STOP: | |
0613ecfc | 1980 | bna_llport_rx_stopped(&rx->bna->port.llport); |
8b230ed8 RM |
1981 | bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); |
1982 | break; | |
1983 | default: | |
ac51f60f | 1984 | bfa_sm_fault(event); |
8b230ed8 RM |
1985 | break; |
1986 | } | |
1987 | } | |
1988 | ||
1989 | void | |
1990 | bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) | |
1991 | { | |
1992 | bna_rxf_stop(&rx->rxf); | |
1993 | } | |
1994 | ||
1995 | void | |
1996 | bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
1997 | { | |
1998 | switch (event) { | |
1999 | case RX_E_RXF_STOPPED: | |
2000 | bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait); | |
2001 | break; | |
2002 | case RX_E_RXF_STARTED: | |
2003 | /** | |
2004 | * RxF was in the process of starting up when | |
2005 | * RXF_E_STOP was issued. Ignore this event | |
2006 | */ | |
2007 | break; | |
2008 | case RX_E_FAIL: | |
2009 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
2010 | rx_ib_fail(rx); | |
2011 | bna_rxf_fail(&rx->rxf); | |
2012 | break; | |
2013 | default: | |
ac51f60f | 2014 | bfa_sm_fault(event); |
8b230ed8 RM |
2015 | break; |
2016 | } | |
2017 | ||
2018 | } | |
2019 | ||
2020 | void | |
2021 | bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx) | |
2022 | { | |
2023 | struct bna_rxp *rxp = NULL; | |
2024 | struct bna_rxq *q0 = NULL; | |
2025 | struct bna_rxq *q1 = NULL; | |
2026 | struct list_head *qe; | |
2027 | u32 rxq_mask[2] = {0, 0}; | |
2028 | ||
2029 | /* Only one call to multi-rxq-stop for all RXPs in this RX */ | |
2030 | bfa_wc_up(&rx->rxq_stop_wc); | |
2031 | list_for_each(qe, &rx->rxp_q) { | |
2032 | rxp = (struct bna_rxp *)qe; | |
2033 | GET_RXQS(rxp, q0, q1); | |
2034 | if (q0->rxq_id < 32) | |
2035 | rxq_mask[0] |= ((u32)1 << q0->rxq_id); | |
2036 | else | |
2037 | rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32)); | |
2038 | if (q1) { | |
2039 | if (q1->rxq_id < 32) | |
2040 | rxq_mask[0] |= ((u32)1 << q1->rxq_id); | |
2041 | else | |
2042 | rxq_mask[1] |= ((u32) | |
2043 | 1 << (q1->rxq_id - 32)); | |
2044 | } | |
2045 | } | |
2046 | ||
2047 | __bna_multi_rxq_stop(rxp, rxq_mask); | |
2048 | } | |
2049 | ||
2050 | void | |
2051 | bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event) | |
2052 | { | |
2053 | struct bna_rxp *rxp = NULL; | |
2054 | struct list_head *qe; | |
2055 | ||
2056 | switch (event) { | |
2057 | case RX_E_RXQ_STOPPED: | |
2058 | list_for_each(qe, &rx->rxp_q) { | |
2059 | rxp = (struct bna_rxp *)qe; | |
2060 | bna_ib_stop(rxp->cq.ib); | |
2061 | } | |
2062 | /* Fall through */ | |
2063 | case RX_E_FAIL: | |
2064 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
2065 | break; | |
2066 | default: | |
ac51f60f | 2067 | bfa_sm_fault(event); |
8b230ed8 RM |
2068 | break; |
2069 | } | |
2070 | } | |
2071 | ||
2072 | void | |
2073 | __bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask) | |
2074 | { | |
2075 | struct bfi_ll_q_stop_req ll_req; | |
2076 | ||
2077 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0); | |
2078 | ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]); | |
2079 | ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]); | |
2080 | bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req), | |
2081 | bna_rx_cb_multi_rxq_stopped, rxp); | |
2082 | bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe); | |
2083 | } | |
2084 | ||
2085 | void | |
2086 | __bna_rxq_start(struct bna_rxq *rxq) | |
2087 | { | |
2088 | struct bna_rxtx_q_mem *q_mem; | |
2089 | struct bna_rxq_mem rxq_cfg, *rxq_mem; | |
2090 | struct bna_dma_addr cur_q_addr; | |
2091 | /* struct bna_doorbell_qset *qset; */ | |
2092 | struct bna_qpt *qpt; | |
2093 | u32 pg_num; | |
2094 | struct bna *bna = rxq->rx->bna; | |
2095 | void __iomem *base_addr; | |
2096 | unsigned long off; | |
2097 | ||
2098 | qpt = &rxq->qpt; | |
2099 | cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr)); | |
2100 | ||
2101 | rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb; | |
2102 | rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb; | |
2103 | rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb; | |
2104 | rxq_cfg.cur_q_entry_hi = cur_q_addr.msb; | |
2105 | ||
2106 | rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0; | |
2107 | rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) | | |
2108 | (qpt->page_size >> 2); | |
2109 | rxq_cfg.sg_n_cq_n_cns_ptr = | |
2110 | ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0; | |
2111 | rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) | | |
2112 | BNA_Q_IDLE_STATE; | |
2113 | rxq_cfg.next_qid = 0x0 | (0x3 << 8); | |
2114 | ||
2115 | /* Write the page number register */ | |
2116 | pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num, | |
2117 | HQM_RXTX_Q_RAM_BASE_OFFSET); | |
2118 | writel(pg_num, bna->regs.page_addr); | |
2119 | ||
2120 | /* Write to h/w */ | |
2121 | base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, | |
2122 | HQM_RXTX_Q_RAM_BASE_OFFSET); | |
2123 | ||
2124 | q_mem = (struct bna_rxtx_q_mem *)0; | |
2125 | rxq_mem = &q_mem[rxq->rxq_id].rxq; | |
2126 | ||
2127 | off = (unsigned long)&rxq_mem->pg_tbl_addr_lo; | |
2128 | writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off); | |
2129 | ||
2130 | off = (unsigned long)&rxq_mem->pg_tbl_addr_hi; | |
2131 | writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off); | |
2132 | ||
2133 | off = (unsigned long)&rxq_mem->cur_q_entry_lo; | |
2134 | writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off); | |
2135 | ||
2136 | off = (unsigned long)&rxq_mem->cur_q_entry_hi; | |
2137 | writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off); | |
2138 | ||
2139 | off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr; | |
2140 | writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off); | |
2141 | ||
2142 | off = (unsigned long)&rxq_mem->entry_n_pg_size; | |
2143 | writel(rxq_cfg.entry_n_pg_size, base_addr + off); | |
2144 | ||
2145 | off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr; | |
2146 | writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off); | |
2147 | ||
2148 | off = (unsigned long)&rxq_mem->buf_sz_n_q_state; | |
2149 | writel(rxq_cfg.buf_sz_n_q_state, base_addr + off); | |
2150 | ||
2151 | off = (unsigned long)&rxq_mem->next_qid; | |
2152 | writel(rxq_cfg.next_qid, base_addr + off); | |
2153 | ||
2154 | rxq->rcb->producer_index = 0; | |
2155 | rxq->rcb->consumer_index = 0; | |
2156 | } | |
2157 | ||
2158 | void | |
2159 | __bna_cq_start(struct bna_cq *cq) | |
2160 | { | |
2161 | struct bna_cq_mem cq_cfg, *cq_mem; | |
2162 | const struct bna_qpt *qpt; | |
2163 | struct bna_dma_addr cur_q_addr; | |
2164 | u32 pg_num; | |
2165 | struct bna *bna = cq->rx->bna; | |
2166 | void __iomem *base_addr; | |
2167 | unsigned long off; | |
2168 | ||
2169 | qpt = &cq->qpt; | |
2170 | cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr)); | |
2171 | ||
2172 | /* | |
2173 | * Fill out structure, to be subsequently written | |
2174 | * to hardware | |
2175 | */ | |
2176 | cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb; | |
2177 | cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb; | |
2178 | cq_cfg.cur_q_entry_lo = cur_q_addr.lsb; | |
2179 | cq_cfg.cur_q_entry_hi = cur_q_addr.msb; | |
2180 | ||
2181 | cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0; | |
2182 | cq_cfg.entry_n_pg_size = | |
2183 | ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2); | |
2184 | cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) | | |
2185 | ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0); | |
2186 | cq_cfg.q_state = BNA_Q_IDLE_STATE; | |
2187 | ||
2188 | /* Write the page number register */ | |
2189 | pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num, | |
2190 | HQM_CQ_RAM_BASE_OFFSET); | |
2191 | ||
2192 | writel(pg_num, bna->regs.page_addr); | |
2193 | ||
2194 | /* H/W write */ | |
2195 | base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva, | |
2196 | HQM_CQ_RAM_BASE_OFFSET); | |
2197 | ||
2198 | cq_mem = (struct bna_cq_mem *)0; | |
2199 | ||
2200 | off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo; | |
2201 | writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off); | |
2202 | ||
2203 | off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi; | |
2204 | writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off); | |
2205 | ||
2206 | off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo; | |
2207 | writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off); | |
2208 | ||
2209 | off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi; | |
2210 | writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off); | |
2211 | ||
2212 | off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr; | |
2213 | writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off); | |
2214 | ||
2215 | off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size; | |
2216 | writel(cq_cfg.entry_n_pg_size, base_addr + off); | |
2217 | ||
2218 | off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr; | |
2219 | writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off); | |
2220 | ||
2221 | off = (unsigned long)&cq_mem[cq->cq_id].q_state; | |
2222 | writel(cq_cfg.q_state, base_addr + off); | |
2223 | ||
2224 | cq->ccb->producer_index = 0; | |
2225 | *(cq->ccb->hw_producer_index) = 0; | |
2226 | } | |
2227 | ||
2228 | void | |
2229 | bna_rit_create(struct bna_rx *rx) | |
2230 | { | |
2231 | struct list_head *qe_rxp; | |
8b230ed8 RM |
2232 | struct bna_rxp *rxp; |
2233 | struct bna_rxq *q0 = NULL; | |
2234 | struct bna_rxq *q1 = NULL; | |
2235 | int offset; | |
2236 | ||
8b230ed8 RM |
2237 | offset = 0; |
2238 | list_for_each(qe_rxp, &rx->rxp_q) { | |
2239 | rxp = (struct bna_rxp *)qe_rxp; | |
2240 | GET_RXQS(rxp, q0, q1); | |
2241 | rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id; | |
2242 | rx->rxf.rit_segment->rit[offset].small_rxq_id = | |
2243 | (q1 ? q1->rxq_id : 0); | |
2244 | offset++; | |
2245 | } | |
2246 | } | |
2247 | ||
b7ee31c5 | 2248 | static int |
8b230ed8 RM |
2249 | _rx_can_satisfy(struct bna_rx_mod *rx_mod, |
2250 | struct bna_rx_config *rx_cfg) | |
2251 | { | |
2252 | if ((rx_mod->rx_free_count == 0) || | |
2253 | (rx_mod->rxp_free_count == 0) || | |
2254 | (rx_mod->rxq_free_count == 0)) | |
2255 | return 0; | |
2256 | ||
2257 | if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { | |
2258 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
2259 | (rx_mod->rxq_free_count < rx_cfg->num_paths)) | |
2260 | return 0; | |
2261 | } else { | |
2262 | if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || | |
2263 | (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) | |
2264 | return 0; | |
2265 | } | |
2266 | ||
2267 | if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths)) | |
2268 | return 0; | |
2269 | ||
2270 | return 1; | |
2271 | } | |
2272 | ||
b7ee31c5 | 2273 | static struct bna_rxq * |
8b230ed8 RM |
2274 | _get_free_rxq(struct bna_rx_mod *rx_mod) |
2275 | { | |
2276 | struct bna_rxq *rxq = NULL; | |
2277 | struct list_head *qe = NULL; | |
2278 | ||
2279 | bfa_q_deq(&rx_mod->rxq_free_q, &qe); | |
2280 | if (qe) { | |
2281 | rx_mod->rxq_free_count--; | |
2282 | rxq = (struct bna_rxq *)qe; | |
2283 | } | |
2284 | return rxq; | |
2285 | } | |
2286 | ||
b7ee31c5 | 2287 | static void |
8b230ed8 RM |
2288 | _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) |
2289 | { | |
2290 | bfa_q_qe_init(&rxq->qe); | |
2291 | list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); | |
2292 | rx_mod->rxq_free_count++; | |
2293 | } | |
2294 | ||
b7ee31c5 | 2295 | static struct bna_rxp * |
8b230ed8 RM |
2296 | _get_free_rxp(struct bna_rx_mod *rx_mod) |
2297 | { | |
2298 | struct list_head *qe = NULL; | |
2299 | struct bna_rxp *rxp = NULL; | |
2300 | ||
2301 | bfa_q_deq(&rx_mod->rxp_free_q, &qe); | |
2302 | if (qe) { | |
2303 | rx_mod->rxp_free_count--; | |
2304 | ||
2305 | rxp = (struct bna_rxp *)qe; | |
2306 | } | |
2307 | ||
2308 | return rxp; | |
2309 | } | |
2310 | ||
b7ee31c5 | 2311 | static void |
8b230ed8 RM |
2312 | _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) |
2313 | { | |
2314 | bfa_q_qe_init(&rxp->qe); | |
2315 | list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); | |
2316 | rx_mod->rxp_free_count++; | |
2317 | } | |
2318 | ||
b7ee31c5 | 2319 | static struct bna_rx * |
8b230ed8 RM |
2320 | _get_free_rx(struct bna_rx_mod *rx_mod) |
2321 | { | |
2322 | struct list_head *qe = NULL; | |
2323 | struct bna_rx *rx = NULL; | |
2324 | ||
2325 | bfa_q_deq(&rx_mod->rx_free_q, &qe); | |
2326 | if (qe) { | |
2327 | rx_mod->rx_free_count--; | |
2328 | ||
2329 | rx = (struct bna_rx *)qe; | |
2330 | bfa_q_qe_init(qe); | |
2331 | list_add_tail(&rx->qe, &rx_mod->rx_active_q); | |
2332 | } | |
2333 | ||
2334 | return rx; | |
2335 | } | |
2336 | ||
b7ee31c5 | 2337 | static void |
8b230ed8 RM |
2338 | _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx) |
2339 | { | |
2340 | bfa_q_qe_init(&rx->qe); | |
2341 | list_add_tail(&rx->qe, &rx_mod->rx_free_q); | |
2342 | rx_mod->rx_free_count++; | |
2343 | } | |
2344 | ||
b7ee31c5 | 2345 | static void |
8b230ed8 RM |
2346 | _rx_init(struct bna_rx *rx, struct bna *bna) |
2347 | { | |
2348 | rx->bna = bna; | |
2349 | rx->rx_flags = 0; | |
2350 | ||
2351 | INIT_LIST_HEAD(&rx->rxp_q); | |
2352 | ||
2353 | rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all; | |
2354 | rx->rxq_stop_wc.wc_cbarg = rx; | |
2355 | rx->rxq_stop_wc.wc_count = 0; | |
2356 | ||
2357 | rx->stop_cbfn = NULL; | |
2358 | rx->stop_cbarg = NULL; | |
2359 | } | |
2360 | ||
b7ee31c5 | 2361 | static void |
8b230ed8 RM |
2362 | _rxp_add_rxqs(struct bna_rxp *rxp, |
2363 | struct bna_rxq *q0, | |
2364 | struct bna_rxq *q1) | |
2365 | { | |
2366 | switch (rxp->type) { | |
2367 | case BNA_RXP_SINGLE: | |
2368 | rxp->rxq.single.only = q0; | |
2369 | rxp->rxq.single.reserved = NULL; | |
2370 | break; | |
2371 | case BNA_RXP_SLR: | |
2372 | rxp->rxq.slr.large = q0; | |
2373 | rxp->rxq.slr.small = q1; | |
2374 | break; | |
2375 | case BNA_RXP_HDS: | |
2376 | rxp->rxq.hds.data = q0; | |
2377 | rxp->rxq.hds.hdr = q1; | |
2378 | break; | |
2379 | default: | |
2380 | break; | |
2381 | } | |
2382 | } | |
2383 | ||
b7ee31c5 | 2384 | static void |
8b230ed8 RM |
2385 | _rxq_qpt_init(struct bna_rxq *rxq, |
2386 | struct bna_rxp *rxp, | |
2387 | u32 page_count, | |
2388 | u32 page_size, | |
2389 | struct bna_mem_descr *qpt_mem, | |
2390 | struct bna_mem_descr *swqpt_mem, | |
2391 | struct bna_mem_descr *page_mem) | |
2392 | { | |
2393 | int i; | |
2394 | ||
2395 | rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
2396 | rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
2397 | rxq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
2398 | rxq->qpt.page_count = page_count; | |
2399 | rxq->qpt.page_size = page_size; | |
2400 | ||
2401 | rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; | |
2402 | ||
2403 | for (i = 0; i < rxq->qpt.page_count; i++) { | |
2404 | rxq->rcb->sw_qpt[i] = page_mem[i].kva; | |
2405 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = | |
2406 | page_mem[i].dma.lsb; | |
2407 | ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = | |
2408 | page_mem[i].dma.msb; | |
2409 | ||
2410 | } | |
2411 | } | |
2412 | ||
b7ee31c5 | 2413 | static void |
8b230ed8 RM |
2414 | _rxp_cqpt_setup(struct bna_rxp *rxp, |
2415 | u32 page_count, | |
2416 | u32 page_size, | |
2417 | struct bna_mem_descr *qpt_mem, | |
2418 | struct bna_mem_descr *swqpt_mem, | |
2419 | struct bna_mem_descr *page_mem) | |
2420 | { | |
2421 | int i; | |
2422 | ||
2423 | rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
2424 | rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
2425 | rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; | |
2426 | rxp->cq.qpt.page_count = page_count; | |
2427 | rxp->cq.qpt.page_size = page_size; | |
2428 | ||
2429 | rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; | |
2430 | ||
2431 | for (i = 0; i < rxp->cq.qpt.page_count; i++) { | |
2432 | rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; | |
2433 | ||
2434 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = | |
2435 | page_mem[i].dma.lsb; | |
2436 | ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = | |
2437 | page_mem[i].dma.msb; | |
2438 | ||
2439 | } | |
2440 | } | |
2441 | ||
b7ee31c5 | 2442 | static void |
8b230ed8 RM |
2443 | _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp) |
2444 | { | |
2445 | list_add_tail(&rxp->qe, &rx->rxp_q); | |
2446 | } | |
2447 | ||
b7ee31c5 | 2448 | static void |
8b230ed8 RM |
2449 | _init_rxmod_queues(struct bna_rx_mod *rx_mod) |
2450 | { | |
2451 | INIT_LIST_HEAD(&rx_mod->rx_free_q); | |
2452 | INIT_LIST_HEAD(&rx_mod->rxq_free_q); | |
2453 | INIT_LIST_HEAD(&rx_mod->rxp_free_q); | |
2454 | INIT_LIST_HEAD(&rx_mod->rx_active_q); | |
2455 | ||
2456 | rx_mod->rx_free_count = 0; | |
2457 | rx_mod->rxq_free_count = 0; | |
2458 | rx_mod->rxp_free_count = 0; | |
2459 | } | |
2460 | ||
b7ee31c5 | 2461 | static void |
8b230ed8 RM |
2462 | _rx_ctor(struct bna_rx *rx, int id) |
2463 | { | |
2464 | bfa_q_qe_init(&rx->qe); | |
2465 | INIT_LIST_HEAD(&rx->rxp_q); | |
2466 | rx->bna = NULL; | |
2467 | ||
2468 | rx->rxf.rxf_id = id; | |
2469 | ||
2470 | /* FIXME: mbox_qe ctor()?? */ | |
2471 | bfa_q_qe_init(&rx->mbox_qe.qe); | |
2472 | ||
2473 | rx->stop_cbfn = NULL; | |
2474 | rx->stop_cbarg = NULL; | |
2475 | } | |
2476 | ||
2477 | void | |
2478 | bna_rx_cb_multi_rxq_stopped(void *arg, int status) | |
2479 | { | |
2480 | struct bna_rxp *rxp = (struct bna_rxp *)arg; | |
2481 | ||
2482 | bfa_wc_down(&rxp->rx->rxq_stop_wc); | |
2483 | } | |
2484 | ||
2485 | void | |
2486 | bna_rx_cb_rxq_stopped_all(void *arg) | |
2487 | { | |
2488 | struct bna_rx *rx = (struct bna_rx *)arg; | |
2489 | ||
2490 | bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED); | |
2491 | } | |
2492 | ||
b7ee31c5 | 2493 | static void |
8b230ed8 RM |
2494 | bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx, |
2495 | enum bna_cb_status status) | |
2496 | { | |
2497 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
2498 | ||
2499 | bfa_wc_down(&rx_mod->rx_stop_wc); | |
2500 | } | |
2501 | ||
b7ee31c5 | 2502 | static void |
8b230ed8 RM |
2503 | bna_rx_mod_cb_rx_stopped_all(void *arg) |
2504 | { | |
2505 | struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; | |
2506 | ||
2507 | if (rx_mod->stop_cbfn) | |
2508 | rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS); | |
2509 | rx_mod->stop_cbfn = NULL; | |
2510 | } | |
2511 | ||
b7ee31c5 | 2512 | static void |
8b230ed8 RM |
2513 | bna_rx_start(struct bna_rx *rx) |
2514 | { | |
2515 | rx->rx_flags |= BNA_RX_F_PORT_ENABLED; | |
2516 | if (rx->rx_flags & BNA_RX_F_ENABLE) | |
2517 | bfa_fsm_send_event(rx, RX_E_START); | |
2518 | } | |
2519 | ||
b7ee31c5 | 2520 | static void |
8b230ed8 RM |
2521 | bna_rx_stop(struct bna_rx *rx) |
2522 | { | |
2523 | rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; | |
2524 | if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped) | |
2525 | bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS); | |
2526 | else { | |
2527 | rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; | |
2528 | rx->stop_cbarg = &rx->bna->rx_mod; | |
2529 | bfa_fsm_send_event(rx, RX_E_STOP); | |
2530 | } | |
2531 | } | |
2532 | ||
b7ee31c5 | 2533 | static void |
8b230ed8 RM |
2534 | bna_rx_fail(struct bna_rx *rx) |
2535 | { | |
2536 | /* Indicate port is not enabled, and failed */ | |
2537 | rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; | |
2538 | rx->rx_flags |= BNA_RX_F_PORT_FAILED; | |
2539 | bfa_fsm_send_event(rx, RX_E_FAIL); | |
2540 | } | |
2541 | ||
8b230ed8 RM |
2542 | void |
2543 | bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2544 | { | |
2545 | struct bna_rx *rx; | |
2546 | struct list_head *qe; | |
2547 | ||
2548 | rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED; | |
2549 | if (type == BNA_RX_T_LOOPBACK) | |
2550 | rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK; | |
2551 | ||
2552 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2553 | rx = (struct bna_rx *)qe; | |
2554 | if (rx->type == type) | |
2555 | bna_rx_start(rx); | |
2556 | } | |
2557 | } | |
2558 | ||
2559 | void | |
2560 | bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) | |
2561 | { | |
2562 | struct bna_rx *rx; | |
2563 | struct list_head *qe; | |
2564 | ||
2565 | rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED; | |
2566 | rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK; | |
2567 | ||
2568 | rx_mod->stop_cbfn = bna_port_cb_rx_stopped; | |
2569 | ||
2570 | /** | |
2571 | * Before calling bna_rx_stop(), increment rx_stop_wc as many times | |
2572 | * as we are going to call bna_rx_stop | |
2573 | */ | |
2574 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2575 | rx = (struct bna_rx *)qe; | |
2576 | if (rx->type == type) | |
2577 | bfa_wc_up(&rx_mod->rx_stop_wc); | |
2578 | } | |
2579 | ||
2580 | if (rx_mod->rx_stop_wc.wc_count == 0) { | |
2581 | rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS); | |
2582 | rx_mod->stop_cbfn = NULL; | |
2583 | return; | |
2584 | } | |
2585 | ||
2586 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2587 | rx = (struct bna_rx *)qe; | |
2588 | if (rx->type == type) | |
2589 | bna_rx_stop(rx); | |
2590 | } | |
2591 | } | |
2592 | ||
2593 | void | |
2594 | bna_rx_mod_fail(struct bna_rx_mod *rx_mod) | |
2595 | { | |
2596 | struct bna_rx *rx; | |
2597 | struct list_head *qe; | |
2598 | ||
2599 | rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED; | |
2600 | rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK; | |
2601 | ||
2602 | list_for_each(qe, &rx_mod->rx_active_q) { | |
2603 | rx = (struct bna_rx *)qe; | |
2604 | bna_rx_fail(rx); | |
2605 | } | |
2606 | } | |
2607 | ||
2608 | void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, | |
2609 | struct bna_res_info *res_info) | |
2610 | { | |
2611 | int index; | |
2612 | struct bna_rx *rx_ptr; | |
2613 | struct bna_rxp *rxp_ptr; | |
2614 | struct bna_rxq *rxq_ptr; | |
2615 | ||
2616 | rx_mod->bna = bna; | |
2617 | rx_mod->flags = 0; | |
2618 | ||
2619 | rx_mod->rx = (struct bna_rx *) | |
2620 | res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; | |
2621 | rx_mod->rxp = (struct bna_rxp *) | |
2622 | res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; | |
2623 | rx_mod->rxq = (struct bna_rxq *) | |
2624 | res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
2625 | ||
2626 | /* Initialize the queues */ | |
2627 | _init_rxmod_queues(rx_mod); | |
2628 | ||
2629 | /* Build RX queues */ | |
2630 | for (index = 0; index < BFI_MAX_RXQ; index++) { | |
2631 | rx_ptr = &rx_mod->rx[index]; | |
2632 | _rx_ctor(rx_ptr, index); | |
2633 | list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); | |
2634 | rx_mod->rx_free_count++; | |
2635 | } | |
2636 | ||
2637 | /* build RX-path queue */ | |
2638 | for (index = 0; index < BFI_MAX_RXQ; index++) { | |
2639 | rxp_ptr = &rx_mod->rxp[index]; | |
2640 | rxp_ptr->cq.cq_id = index; | |
2641 | bfa_q_qe_init(&rxp_ptr->qe); | |
2642 | list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); | |
2643 | rx_mod->rxp_free_count++; | |
2644 | } | |
2645 | ||
2646 | /* build RXQ queue */ | |
2647 | for (index = 0; index < BFI_MAX_RXQ; index++) { | |
2648 | rxq_ptr = &rx_mod->rxq[index]; | |
2649 | rxq_ptr->rxq_id = index; | |
2650 | ||
2651 | bfa_q_qe_init(&rxq_ptr->qe); | |
2652 | list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); | |
2653 | rx_mod->rxq_free_count++; | |
2654 | } | |
2655 | ||
2656 | rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all; | |
2657 | rx_mod->rx_stop_wc.wc_cbarg = rx_mod; | |
2658 | rx_mod->rx_stop_wc.wc_count = 0; | |
2659 | } | |
2660 | ||
2661 | void | |
2662 | bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) | |
2663 | { | |
2664 | struct list_head *qe; | |
2665 | int i; | |
2666 | ||
2667 | i = 0; | |
2668 | list_for_each(qe, &rx_mod->rx_free_q) | |
2669 | i++; | |
2670 | ||
2671 | i = 0; | |
2672 | list_for_each(qe, &rx_mod->rxp_free_q) | |
2673 | i++; | |
2674 | ||
2675 | i = 0; | |
2676 | list_for_each(qe, &rx_mod->rxq_free_q) | |
2677 | i++; | |
2678 | ||
2679 | rx_mod->bna = NULL; | |
2680 | } | |
2681 | ||
2682 | int | |
2683 | bna_rx_state_get(struct bna_rx *rx) | |
2684 | { | |
2685 | return bfa_sm_to_state(rx_sm_table, rx->fsm); | |
2686 | } | |
2687 | ||
2688 | void | |
2689 | bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) | |
2690 | { | |
2691 | u32 cq_size, hq_size, dq_size; | |
2692 | u32 cpage_count, hpage_count, dpage_count; | |
2693 | struct bna_mem_info *mem_info; | |
2694 | u32 cq_depth; | |
2695 | u32 hq_depth; | |
2696 | u32 dq_depth; | |
2697 | ||
2698 | dq_depth = q_cfg->q_depth; | |
2699 | hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); | |
2700 | cq_depth = dq_depth + hq_depth; | |
2701 | ||
2702 | BNA_TO_POWER_OF_2_HIGH(cq_depth); | |
2703 | cq_size = cq_depth * BFI_CQ_WI_SIZE; | |
2704 | cq_size = ALIGN(cq_size, PAGE_SIZE); | |
2705 | cpage_count = SIZE_TO_PAGES(cq_size); | |
2706 | ||
2707 | BNA_TO_POWER_OF_2_HIGH(dq_depth); | |
2708 | dq_size = dq_depth * BFI_RXQ_WI_SIZE; | |
2709 | dq_size = ALIGN(dq_size, PAGE_SIZE); | |
2710 | dpage_count = SIZE_TO_PAGES(dq_size); | |
2711 | ||
2712 | if (BNA_RXP_SINGLE != q_cfg->rxp_type) { | |
2713 | BNA_TO_POWER_OF_2_HIGH(hq_depth); | |
2714 | hq_size = hq_depth * BFI_RXQ_WI_SIZE; | |
2715 | hq_size = ALIGN(hq_size, PAGE_SIZE); | |
2716 | hpage_count = SIZE_TO_PAGES(hq_size); | |
2717 | } else { | |
2718 | hpage_count = 0; | |
2719 | } | |
2720 | ||
2721 | /* CCB structures */ | |
2722 | res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; | |
2723 | mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; | |
2724 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2725 | mem_info->len = sizeof(struct bna_ccb); | |
2726 | mem_info->num = q_cfg->num_paths; | |
2727 | ||
2728 | /* RCB structures */ | |
2729 | res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; | |
2730 | mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; | |
2731 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2732 | mem_info->len = sizeof(struct bna_rcb); | |
2733 | mem_info->num = BNA_GET_RXQS(q_cfg); | |
2734 | ||
2735 | /* Completion QPT */ | |
2736 | res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; | |
2737 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; | |
2738 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2739 | mem_info->len = cpage_count * sizeof(struct bna_dma_addr); | |
2740 | mem_info->num = q_cfg->num_paths; | |
2741 | ||
2742 | /* Completion s/w QPT */ | |
2743 | res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; | |
2744 | mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; | |
2745 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2746 | mem_info->len = cpage_count * sizeof(void *); | |
2747 | mem_info->num = q_cfg->num_paths; | |
2748 | ||
2749 | /* Completion QPT pages */ | |
2750 | res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; | |
2751 | mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; | |
2752 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2753 | mem_info->len = PAGE_SIZE; | |
2754 | mem_info->num = cpage_count * q_cfg->num_paths; | |
2755 | ||
2756 | /* Data QPTs */ | |
2757 | res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; | |
2758 | mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; | |
2759 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2760 | mem_info->len = dpage_count * sizeof(struct bna_dma_addr); | |
2761 | mem_info->num = q_cfg->num_paths; | |
2762 | ||
2763 | /* Data s/w QPTs */ | |
2764 | res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; | |
2765 | mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; | |
2766 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2767 | mem_info->len = dpage_count * sizeof(void *); | |
2768 | mem_info->num = q_cfg->num_paths; | |
2769 | ||
2770 | /* Data QPT pages */ | |
2771 | res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; | |
2772 | mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; | |
2773 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2774 | mem_info->len = PAGE_SIZE; | |
2775 | mem_info->num = dpage_count * q_cfg->num_paths; | |
2776 | ||
2777 | /* Hdr QPTs */ | |
2778 | res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; | |
2779 | mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; | |
2780 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2781 | mem_info->len = hpage_count * sizeof(struct bna_dma_addr); | |
2782 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2783 | ||
2784 | /* Hdr s/w QPTs */ | |
2785 | res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; | |
2786 | mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; | |
2787 | mem_info->mem_type = BNA_MEM_T_KVA; | |
2788 | mem_info->len = hpage_count * sizeof(void *); | |
2789 | mem_info->num = (hpage_count ? q_cfg->num_paths : 0); | |
2790 | ||
2791 | /* Hdr QPT pages */ | |
2792 | res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; | |
2793 | mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; | |
2794 | mem_info->mem_type = BNA_MEM_T_DMA; | |
2795 | mem_info->len = (hpage_count ? PAGE_SIZE : 0); | |
2796 | mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); | |
2797 | ||
2798 | /* RX Interrupts */ | |
2799 | res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; | |
2800 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; | |
2801 | res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; | |
2802 | } | |
2803 | ||
2804 | struct bna_rx * | |
2805 | bna_rx_create(struct bna *bna, struct bnad *bnad, | |
2806 | struct bna_rx_config *rx_cfg, | |
2807 | struct bna_rx_event_cbfn *rx_cbfn, | |
2808 | struct bna_res_info *res_info, | |
2809 | void *priv) | |
2810 | { | |
2811 | struct bna_rx_mod *rx_mod = &bna->rx_mod; | |
2812 | struct bna_rx *rx; | |
2813 | struct bna_rxp *rxp; | |
2814 | struct bna_rxq *q0; | |
2815 | struct bna_rxq *q1; | |
2816 | struct bna_intr_info *intr_info; | |
2817 | u32 page_count; | |
2818 | struct bna_mem_descr *ccb_mem; | |
2819 | struct bna_mem_descr *rcb_mem; | |
2820 | struct bna_mem_descr *unmapq_mem; | |
2821 | struct bna_mem_descr *cqpt_mem; | |
2822 | struct bna_mem_descr *cswqpt_mem; | |
2823 | struct bna_mem_descr *cpage_mem; | |
2824 | struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */ | |
2825 | struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */ | |
2826 | struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */ | |
2827 | struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */ | |
2828 | struct bna_mem_descr *hpage_mem; /* hdr page mem */ | |
2829 | struct bna_mem_descr *dpage_mem; /* data page mem */ | |
58598542 | 2830 | int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; |
8b230ed8 RM |
2831 | int dpage_count, hpage_count, rcb_idx; |
2832 | struct bna_ib_config ibcfg; | |
2833 | /* Fail if we don't have enough RXPs, RXQs */ | |
2834 | if (!_rx_can_satisfy(rx_mod, rx_cfg)) | |
2835 | return NULL; | |
2836 | ||
2837 | /* Initialize resource pointers */ | |
2838 | intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
2839 | ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; | |
2840 | rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; | |
2841 | unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; | |
2842 | cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; | |
2843 | cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; | |
2844 | cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; | |
2845 | hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; | |
2846 | dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; | |
2847 | hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; | |
2848 | dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; | |
2849 | hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; | |
2850 | dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; | |
2851 | ||
2852 | /* Compute q depth & page count */ | |
2853 | page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / | |
2854 | rx_cfg->num_paths; | |
2855 | ||
2856 | dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / | |
2857 | rx_cfg->num_paths; | |
2858 | ||
2859 | hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / | |
2860 | rx_cfg->num_paths; | |
2861 | /* Get RX pointer */ | |
2862 | rx = _get_free_rx(rx_mod); | |
2863 | _rx_init(rx, bna); | |
2864 | rx->priv = priv; | |
2865 | rx->type = rx_cfg->rx_type; | |
2866 | ||
2867 | rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; | |
2868 | rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; | |
2869 | rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; | |
2870 | rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; | |
2871 | /* Following callbacks are mandatory */ | |
2872 | rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; | |
2873 | rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; | |
2874 | ||
2875 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) { | |
2876 | switch (rx->type) { | |
2877 | case BNA_RX_T_REGULAR: | |
2878 | if (!(rx->bna->rx_mod.flags & | |
2879 | BNA_RX_MOD_F_PORT_LOOPBACK)) | |
2880 | rx->rx_flags |= BNA_RX_F_PORT_ENABLED; | |
2881 | break; | |
2882 | case BNA_RX_T_LOOPBACK: | |
2883 | if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK) | |
2884 | rx->rx_flags |= BNA_RX_F_PORT_ENABLED; | |
2885 | break; | |
2886 | } | |
2887 | } | |
2888 | ||
2889 | for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) { | |
2890 | rxp = _get_free_rxp(rx_mod); | |
2891 | rxp->type = rx_cfg->rxp_type; | |
2892 | rxp->rx = rx; | |
2893 | rxp->cq.rx = rx; | |
2894 | ||
2895 | /* Get required RXQs, and queue them to rx-path */ | |
2896 | q0 = _get_free_rxq(rx_mod); | |
2897 | if (BNA_RXP_SINGLE == rx_cfg->rxp_type) | |
2898 | q1 = NULL; | |
2899 | else | |
2900 | q1 = _get_free_rxq(rx_mod); | |
2901 | ||
2902 | /* Initialize IB */ | |
2903 | if (1 == intr_info->num) { | |
2904 | rxp->cq.ib = bna_ib_get(&bna->ib_mod, | |
2905 | intr_info->intr_type, | |
2906 | intr_info->idl[0].vector); | |
2907 | rxp->vector = intr_info->idl[0].vector; | |
2908 | } else { | |
2909 | rxp->cq.ib = bna_ib_get(&bna->ib_mod, | |
2910 | intr_info->intr_type, | |
2911 | intr_info->idl[i].vector); | |
2912 | ||
2913 | /* Map the MSI-x vector used for this RXP */ | |
2914 | rxp->vector = intr_info->idl[i].vector; | |
2915 | } | |
2916 | ||
2917 | rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib); | |
2918 | ||
2919 | ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO; | |
2920 | ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT; | |
2921 | ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; | |
2922 | ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE; | |
2923 | ||
58598542 | 2924 | bna_ib_config(rxp->cq.ib, &ibcfg); |
8b230ed8 RM |
2925 | |
2926 | /* Link rxqs to rxp */ | |
2927 | _rxp_add_rxqs(rxp, q0, q1); | |
2928 | ||
2929 | /* Link rxp to rx */ | |
2930 | _rx_add_rxp(rx, rxp); | |
2931 | ||
2932 | q0->rx = rx; | |
2933 | q0->rxp = rxp; | |
2934 | ||
2935 | /* Initialize RCB for the large / data q */ | |
2936 | q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
2937 | RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0, | |
2938 | (void *)unmapq_mem[rcb_idx].kva); | |
2939 | rcb_idx++; | |
2940 | (q0)->rx_packets = (q0)->rx_bytes = 0; | |
2941 | (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0; | |
2942 | ||
2943 | /* Initialize RXQs */ | |
2944 | _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE, | |
2945 | &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); | |
2946 | q0->rcb->page_idx = dpage_idx; | |
2947 | q0->rcb->page_count = dpage_count; | |
2948 | dpage_idx += dpage_count; | |
2949 | ||
2950 | /* Call bnad to complete rcb setup */ | |
2951 | if (rx->rcb_setup_cbfn) | |
2952 | rx->rcb_setup_cbfn(bnad, q0->rcb); | |
2953 | ||
2954 | if (q1) { | |
2955 | q1->rx = rx; | |
2956 | q1->rxp = rxp; | |
2957 | ||
2958 | q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; | |
2959 | RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1, | |
2960 | (void *)unmapq_mem[rcb_idx].kva); | |
2961 | rcb_idx++; | |
2962 | (q1)->buffer_size = (rx_cfg)->small_buff_size; | |
2963 | (q1)->rx_packets = (q1)->rx_bytes = 0; | |
2964 | (q1)->rx_packets_with_error = | |
2965 | (q1)->rxbuf_alloc_failed = 0; | |
2966 | ||
2967 | _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE, | |
2968 | &hqpt_mem[i], &hsqpt_mem[i], | |
2969 | &hpage_mem[hpage_idx]); | |
2970 | q1->rcb->page_idx = hpage_idx; | |
2971 | q1->rcb->page_count = hpage_count; | |
2972 | hpage_idx += hpage_count; | |
2973 | ||
2974 | /* Call bnad to complete rcb setup */ | |
2975 | if (rx->rcb_setup_cbfn) | |
2976 | rx->rcb_setup_cbfn(bnad, q1->rcb); | |
2977 | } | |
2978 | /* Setup RXP::CQ */ | |
2979 | rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; | |
2980 | _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, | |
2981 | &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); | |
2982 | rxp->cq.ccb->page_idx = cpage_idx; | |
2983 | rxp->cq.ccb->page_count = page_count; | |
2984 | cpage_idx += page_count; | |
2985 | ||
2986 | rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; | |
2987 | rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; | |
2988 | ||
2989 | rxp->cq.ccb->producer_index = 0; | |
2990 | rxp->cq.ccb->q_depth = rx_cfg->q_depth + | |
2991 | ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? | |
2992 | 0 : rx_cfg->q_depth); | |
2993 | rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell; | |
2994 | rxp->cq.ccb->rcb[0] = q0->rcb; | |
2995 | if (q1) | |
2996 | rxp->cq.ccb->rcb[1] = q1->rcb; | |
2997 | rxp->cq.ccb->cq = &rxp->cq; | |
2998 | rxp->cq.ccb->bnad = bna->bnad; | |
2999 | rxp->cq.ccb->hw_producer_index = | |
3000 | ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva + | |
3001 | (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE)); | |
3002 | *(rxp->cq.ccb->hw_producer_index) = 0; | |
3003 | rxp->cq.ccb->intr_type = intr_info->intr_type; | |
3004 | rxp->cq.ccb->intr_vector = (intr_info->num == 1) ? | |
3005 | intr_info->idl[0].vector : | |
3006 | intr_info->idl[i].vector; | |
3007 | rxp->cq.ccb->rx_coalescing_timeo = | |
3008 | rxp->cq.ib->ib_config.coalescing_timeo; | |
3009 | rxp->cq.ccb->id = i; | |
3010 | ||
3011 | /* Call bnad to complete CCB setup */ | |
3012 | if (rx->ccb_setup_cbfn) | |
3013 | rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); | |
3014 | ||
3015 | } /* for each rx-path */ | |
3016 | ||
3017 | bna_rxf_init(&rx->rxf, rx, rx_cfg); | |
3018 | ||
3019 | bfa_fsm_set_state(rx, bna_rx_sm_stopped); | |
3020 | ||
3021 | return rx; | |
3022 | } | |
3023 | ||
3024 | void | |
3025 | bna_rx_destroy(struct bna_rx *rx) | |
3026 | { | |
3027 | struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; | |
3028 | struct bna_ib_mod *ib_mod = &rx->bna->ib_mod; | |
3029 | struct bna_rxq *q0 = NULL; | |
3030 | struct bna_rxq *q1 = NULL; | |
3031 | struct bna_rxp *rxp; | |
3032 | struct list_head *qe; | |
3033 | ||
3034 | bna_rxf_uninit(&rx->rxf); | |
3035 | ||
3036 | while (!list_empty(&rx->rxp_q)) { | |
3037 | bfa_q_deq(&rx->rxp_q, &rxp); | |
3038 | GET_RXQS(rxp, q0, q1); | |
3039 | /* Callback to bnad for destroying RCB */ | |
3040 | if (rx->rcb_destroy_cbfn) | |
3041 | rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); | |
3042 | q0->rcb = NULL; | |
3043 | q0->rxp = NULL; | |
3044 | q0->rx = NULL; | |
3045 | _put_free_rxq(rx_mod, q0); | |
3046 | if (q1) { | |
3047 | /* Callback to bnad for destroying RCB */ | |
3048 | if (rx->rcb_destroy_cbfn) | |
3049 | rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); | |
3050 | q1->rcb = NULL; | |
3051 | q1->rxp = NULL; | |
3052 | q1->rx = NULL; | |
3053 | _put_free_rxq(rx_mod, q1); | |
3054 | } | |
3055 | rxp->rxq.slr.large = NULL; | |
3056 | rxp->rxq.slr.small = NULL; | |
3057 | if (rxp->cq.ib) { | |
3058 | if (rxp->cq.ib_seg_offset != 0xff) | |
3059 | bna_ib_release_idx(rxp->cq.ib, | |
3060 | rxp->cq.ib_seg_offset); | |
3061 | bna_ib_put(ib_mod, rxp->cq.ib); | |
3062 | rxp->cq.ib = NULL; | |
3063 | } | |
3064 | /* Callback to bnad for destroying CCB */ | |
3065 | if (rx->ccb_destroy_cbfn) | |
3066 | rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); | |
3067 | rxp->cq.ccb = NULL; | |
3068 | rxp->rx = NULL; | |
3069 | _put_free_rxp(rx_mod, rxp); | |
3070 | } | |
3071 | ||
3072 | list_for_each(qe, &rx_mod->rx_active_q) { | |
3073 | if (qe == &rx->qe) { | |
3074 | list_del(&rx->qe); | |
3075 | bfa_q_qe_init(&rx->qe); | |
3076 | break; | |
3077 | } | |
3078 | } | |
3079 | ||
3080 | rx->bna = NULL; | |
3081 | rx->priv = NULL; | |
3082 | _put_free_rx(rx_mod, rx); | |
3083 | } | |
3084 | ||
3085 | void | |
3086 | bna_rx_enable(struct bna_rx *rx) | |
3087 | { | |
3088 | if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped) | |
3089 | return; | |
3090 | ||
3091 | rx->rx_flags |= BNA_RX_F_ENABLE; | |
3092 | if (rx->rx_flags & BNA_RX_F_PORT_ENABLED) | |
3093 | bfa_fsm_send_event(rx, RX_E_START); | |
3094 | } | |
3095 | ||
3096 | void | |
3097 | bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, | |
3098 | void (*cbfn)(void *, struct bna_rx *, | |
3099 | enum bna_cb_status)) | |
3100 | { | |
3101 | if (type == BNA_SOFT_CLEANUP) { | |
3102 | /* h/w should not be accessed. Treat we're stopped */ | |
3103 | (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); | |
3104 | } else { | |
3105 | rx->stop_cbfn = cbfn; | |
3106 | rx->stop_cbarg = rx->bna->bnad; | |
3107 | ||
3108 | rx->rx_flags &= ~BNA_RX_F_ENABLE; | |
3109 | ||
3110 | bfa_fsm_send_event(rx, RX_E_STOP); | |
3111 | } | |
3112 | } | |
3113 | ||
3114 | /** | |
3115 | * TX | |
3116 | */ | |
3117 | #define call_tx_stop_cbfn(tx, status)\ | |
3118 | do {\ | |
3119 | if ((tx)->stop_cbfn)\ | |
3120 | (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\ | |
3121 | (tx)->stop_cbfn = NULL;\ | |
3122 | (tx)->stop_cbarg = NULL;\ | |
3123 | } while (0) | |
3124 | ||
3125 | #define call_tx_prio_change_cbfn(tx, status)\ | |
3126 | do {\ | |
3127 | if ((tx)->prio_change_cbfn)\ | |
3128 | (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\ | |
3129 | (tx)->prio_change_cbfn = NULL;\ | |
3130 | } while (0) | |
3131 | ||
3132 | static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx, | |
3133 | enum bna_cb_status status); | |
3134 | static void bna_tx_cb_txq_stopped(void *arg, int status); | |
3135 | static void bna_tx_cb_stats_cleared(void *arg, int status); | |
3136 | static void __bna_tx_stop(struct bna_tx *tx); | |
3137 | static void __bna_tx_start(struct bna_tx *tx); | |
3138 | static void __bna_txf_stat_clr(struct bna_tx *tx); | |
3139 | ||
3140 | enum bna_tx_event { | |
3141 | TX_E_START = 1, | |
3142 | TX_E_STOP = 2, | |
3143 | TX_E_FAIL = 3, | |
3144 | TX_E_TXQ_STOPPED = 4, | |
3145 | TX_E_PRIO_CHANGE = 5, | |
3146 | TX_E_STAT_CLEARED = 6, | |
3147 | }; | |
3148 | ||
3149 | enum bna_tx_state { | |
3150 | BNA_TX_STOPPED = 1, | |
3151 | BNA_TX_STARTED = 2, | |
3152 | BNA_TX_TXQ_STOP_WAIT = 3, | |
3153 | BNA_TX_PRIO_STOP_WAIT = 4, | |
3154 | BNA_TX_STAT_CLR_WAIT = 5, | |
3155 | }; | |
3156 | ||
3157 | bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, | |
3158 | enum bna_tx_event); | |
3159 | bfa_fsm_state_decl(bna_tx, started, struct bna_tx, | |
3160 | enum bna_tx_event); | |
3161 | bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx, | |
3162 | enum bna_tx_event); | |
3163 | bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, | |
3164 | enum bna_tx_event); | |
3165 | bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx, | |
3166 | enum bna_tx_event); | |
3167 | ||
3168 | static struct bfa_sm_table tx_sm_table[] = { | |
3169 | {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED}, | |
3170 | {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED}, | |
3171 | {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT}, | |
3172 | {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT}, | |
3173 | {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT}, | |
3174 | }; | |
3175 | ||
3176 | static void | |
3177 | bna_tx_sm_stopped_entry(struct bna_tx *tx) | |
3178 | { | |
3179 | struct bna_txq *txq; | |
3180 | struct list_head *qe; | |
3181 | ||
3182 | list_for_each(qe, &tx->txq_q) { | |
3183 | txq = (struct bna_txq *)qe; | |
3184 | (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb); | |
3185 | } | |
3186 | ||
3187 | call_tx_stop_cbfn(tx, BNA_CB_SUCCESS); | |
3188 | } | |
3189 | ||
3190 | static void | |
3191 | bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) | |
3192 | { | |
3193 | switch (event) { | |
3194 | case TX_E_START: | |
3195 | bfa_fsm_set_state(tx, bna_tx_sm_started); | |
3196 | break; | |
3197 | ||
3198 | case TX_E_STOP: | |
3199 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3200 | break; | |
3201 | ||
3202 | case TX_E_FAIL: | |
3203 | /* No-op */ | |
3204 | break; | |
3205 | ||
3206 | case TX_E_PRIO_CHANGE: | |
3207 | call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS); | |
3208 | break; | |
3209 | ||
3210 | case TX_E_TXQ_STOPPED: | |
3211 | /** | |
3212 | * This event is received due to flushing of mbox when | |
3213 | * device fails | |
3214 | */ | |
3215 | /* No-op */ | |
3216 | break; | |
3217 | ||
3218 | default: | |
ac51f60f | 3219 | bfa_sm_fault(event); |
8b230ed8 RM |
3220 | } |
3221 | } | |
3222 | ||
3223 | static void | |
3224 | bna_tx_sm_started_entry(struct bna_tx *tx) | |
3225 | { | |
3226 | struct bna_txq *txq; | |
3227 | struct list_head *qe; | |
3228 | ||
3229 | __bna_tx_start(tx); | |
3230 | ||
3231 | /* Start IB */ | |
3232 | list_for_each(qe, &tx->txq_q) { | |
3233 | txq = (struct bna_txq *)qe; | |
3234 | bna_ib_ack(&txq->ib->door_bell, 0); | |
3235 | } | |
3236 | } | |
3237 | ||
3238 | static void | |
3239 | bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) | |
3240 | { | |
3241 | struct bna_txq *txq; | |
3242 | struct list_head *qe; | |
3243 | ||
3244 | switch (event) { | |
3245 | case TX_E_STOP: | |
3246 | bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait); | |
3247 | __bna_tx_stop(tx); | |
3248 | break; | |
3249 | ||
3250 | case TX_E_FAIL: | |
3251 | list_for_each(qe, &tx->txq_q) { | |
3252 | txq = (struct bna_txq *)qe; | |
3253 | bna_ib_fail(txq->ib); | |
3254 | (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb); | |
3255 | } | |
3256 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3257 | break; | |
3258 | ||
3259 | case TX_E_PRIO_CHANGE: | |
3260 | bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); | |
3261 | break; | |
3262 | ||
3263 | default: | |
ac51f60f | 3264 | bfa_sm_fault(event); |
8b230ed8 RM |
3265 | } |
3266 | } | |
3267 | ||
3268 | static void | |
3269 | bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx) | |
3270 | { | |
3271 | } | |
3272 | ||
3273 | static void | |
3274 | bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3275 | { | |
3276 | struct bna_txq *txq; | |
3277 | struct list_head *qe; | |
3278 | ||
3279 | switch (event) { | |
3280 | case TX_E_FAIL: | |
3281 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3282 | break; | |
3283 | ||
3284 | case TX_E_TXQ_STOPPED: | |
3285 | list_for_each(qe, &tx->txq_q) { | |
3286 | txq = (struct bna_txq *)qe; | |
3287 | bna_ib_stop(txq->ib); | |
3288 | } | |
3289 | bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait); | |
3290 | break; | |
3291 | ||
3292 | case TX_E_PRIO_CHANGE: | |
3293 | /* No-op */ | |
3294 | break; | |
3295 | ||
3296 | default: | |
ac51f60f | 3297 | bfa_sm_fault(event); |
8b230ed8 RM |
3298 | } |
3299 | } | |
3300 | ||
3301 | static void | |
3302 | bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) | |
3303 | { | |
3304 | __bna_tx_stop(tx); | |
3305 | } | |
3306 | ||
3307 | static void | |
3308 | bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3309 | { | |
3310 | struct bna_txq *txq; | |
3311 | struct list_head *qe; | |
3312 | ||
3313 | switch (event) { | |
3314 | case TX_E_STOP: | |
3315 | bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait); | |
3316 | break; | |
3317 | ||
3318 | case TX_E_FAIL: | |
3319 | call_tx_prio_change_cbfn(tx, BNA_CB_FAIL); | |
3320 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3321 | break; | |
3322 | ||
3323 | case TX_E_TXQ_STOPPED: | |
3324 | list_for_each(qe, &tx->txq_q) { | |
3325 | txq = (struct bna_txq *)qe; | |
3326 | bna_ib_stop(txq->ib); | |
3327 | (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb); | |
3328 | } | |
3329 | call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS); | |
3330 | bfa_fsm_set_state(tx, bna_tx_sm_started); | |
3331 | break; | |
3332 | ||
3333 | case TX_E_PRIO_CHANGE: | |
3334 | /* No-op */ | |
3335 | break; | |
3336 | ||
3337 | default: | |
ac51f60f | 3338 | bfa_sm_fault(event); |
8b230ed8 RM |
3339 | } |
3340 | } | |
3341 | ||
3342 | static void | |
3343 | bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx) | |
3344 | { | |
3345 | __bna_txf_stat_clr(tx); | |
3346 | } | |
3347 | ||
3348 | static void | |
3349 | bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event) | |
3350 | { | |
3351 | switch (event) { | |
3352 | case TX_E_FAIL: | |
3353 | case TX_E_STAT_CLEARED: | |
3354 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3355 | break; | |
3356 | ||
3357 | default: | |
ac51f60f | 3358 | bfa_sm_fault(event); |
8b230ed8 RM |
3359 | } |
3360 | } | |
3361 | ||
3362 | static void | |
3363 | __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq) | |
3364 | { | |
3365 | struct bna_rxtx_q_mem *q_mem; | |
3366 | struct bna_txq_mem txq_cfg; | |
3367 | struct bna_txq_mem *txq_mem; | |
3368 | struct bna_dma_addr cur_q_addr; | |
3369 | u32 pg_num; | |
3370 | void __iomem *base_addr; | |
3371 | unsigned long off; | |
3372 | ||
3373 | /* Fill out structure, to be subsequently written to hardware */ | |
3374 | txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb; | |
3375 | txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb; | |
3376 | cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr)); | |
3377 | txq_cfg.cur_q_entry_lo = cur_q_addr.lsb; | |
3378 | txq_cfg.cur_q_entry_hi = cur_q_addr.msb; | |
3379 | ||
3380 | txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0; | |
3381 | ||
3382 | txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) | | |
3383 | (txq->qpt.page_size >> 2); | |
3384 | txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) | | |
3385 | ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0); | |
3386 | ||
3387 | txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; | |
3388 | txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | | |
0613ecfc | 3389 | (txq->priority & 0x7)); |
8b230ed8 RM |
3390 | txq_cfg.wvc_n_cquota_n_rquota = |
3391 | ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | | |
3392 | (BFI_TX_MAX_WRR_QUOTA & 0xfff)); | |
3393 | ||
3394 | /* Setup the page and write to H/W */ | |
3395 | ||
3396 | pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num, | |
3397 | HQM_RXTX_Q_RAM_BASE_OFFSET); | |
3398 | writel(pg_num, tx->bna->regs.page_addr); | |
3399 | ||
3400 | base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, | |
3401 | HQM_RXTX_Q_RAM_BASE_OFFSET); | |
3402 | q_mem = (struct bna_rxtx_q_mem *)0; | |
3403 | txq_mem = &q_mem[txq->txq_id].txq; | |
3404 | ||
3405 | /* | |
3406 | * The following 4 lines, is a hack b'cos the H/W needs to read | |
3407 | * these DMA addresses as little endian | |
3408 | */ | |
3409 | ||
3410 | off = (unsigned long)&txq_mem->pg_tbl_addr_lo; | |
3411 | writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off); | |
3412 | ||
3413 | off = (unsigned long)&txq_mem->pg_tbl_addr_hi; | |
3414 | writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off); | |
3415 | ||
3416 | off = (unsigned long)&txq_mem->cur_q_entry_lo; | |
3417 | writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off); | |
3418 | ||
3419 | off = (unsigned long)&txq_mem->cur_q_entry_hi; | |
3420 | writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off); | |
3421 | ||
3422 | off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr; | |
3423 | writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off); | |
3424 | ||
3425 | off = (unsigned long)&txq_mem->entry_n_pg_size; | |
3426 | writel(txq_cfg.entry_n_pg_size, base_addr + off); | |
3427 | ||
3428 | off = (unsigned long)&txq_mem->int_blk_n_cns_ptr; | |
3429 | writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off); | |
3430 | ||
3431 | off = (unsigned long)&txq_mem->cns_ptr2_n_q_state; | |
3432 | writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off); | |
3433 | ||
3434 | off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri; | |
3435 | writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off); | |
3436 | ||
3437 | off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota; | |
3438 | writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off); | |
3439 | ||
3440 | txq->tcb->producer_index = 0; | |
3441 | txq->tcb->consumer_index = 0; | |
3442 | *(txq->tcb->hw_consumer_index) = 0; | |
3443 | ||
3444 | } | |
3445 | ||
3446 | static void | |
3447 | __bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq) | |
3448 | { | |
3449 | struct bfi_ll_q_stop_req ll_req; | |
3450 | u32 bit_mask[2] = {0, 0}; | |
3451 | if (txq->txq_id < 32) | |
3452 | bit_mask[0] = (u32)1 << txq->txq_id; | |
3453 | else | |
3454 | bit_mask[1] = (u32)1 << (txq->txq_id - 32); | |
3455 | ||
3456 | memset(&ll_req, 0, sizeof(ll_req)); | |
3457 | ll_req.mh.msg_class = BFI_MC_LL; | |
3458 | ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ; | |
3459 | ll_req.mh.mtag.h2i.lpu_id = 0; | |
3460 | ll_req.q_id_mask[0] = htonl(bit_mask[0]); | |
3461 | ll_req.q_id_mask[1] = htonl(bit_mask[1]); | |
3462 | ||
3463 | bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req), | |
3464 | bna_tx_cb_txq_stopped, tx); | |
3465 | ||
3466 | bna_mbox_send(tx->bna, &tx->mbox_qe); | |
3467 | } | |
3468 | ||
3469 | static void | |
3470 | __bna_txf_start(struct bna_tx *tx) | |
3471 | { | |
3472 | struct bna_tx_fndb_ram *tx_fndb; | |
3473 | struct bna_txf *txf = &tx->txf; | |
3474 | void __iomem *base_addr; | |
3475 | unsigned long off; | |
3476 | ||
3477 | writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + | |
3478 | (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET), | |
3479 | tx->bna->regs.page_addr); | |
3480 | ||
3481 | base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, | |
3482 | TX_FNDB_RAM_BASE_OFFSET); | |
3483 | ||
3484 | tx_fndb = (struct bna_tx_fndb_ram *)0; | |
3485 | off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags; | |
3486 | ||
3487 | writel(((u32)txf->vlan << 16) | txf->ctrl_flags, | |
3488 | base_addr + off); | |
3489 | ||
3490 | if (tx->txf.txf_id < 32) | |
3491 | tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id); | |
3492 | else | |
3493 | tx->bna->tx_mod.txf_bmap[1] |= ((u32) | |
3494 | 1 << (tx->txf.txf_id - 32)); | |
3495 | } | |
3496 | ||
3497 | static void | |
3498 | __bna_txf_stop(struct bna_tx *tx) | |
3499 | { | |
3500 | struct bna_tx_fndb_ram *tx_fndb; | |
3501 | u32 page_num; | |
3502 | u32 ctl_flags; | |
3503 | struct bna_txf *txf = &tx->txf; | |
3504 | void __iomem *base_addr; | |
3505 | unsigned long off; | |
3506 | ||
3507 | /* retrieve the running txf_flags & turn off enable bit */ | |
3508 | page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM + | |
3509 | (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET); | |
3510 | writel(page_num, tx->bna->regs.page_addr); | |
3511 | ||
3512 | base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva, | |
3513 | TX_FNDB_RAM_BASE_OFFSET); | |
3514 | tx_fndb = (struct bna_tx_fndb_ram *)0; | |
3515 | off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags; | |
3516 | ||
3517 | ctl_flags = readl(base_addr + off); | |
3518 | ctl_flags &= ~BFI_TXF_CF_ENABLE; | |
3519 | ||
3520 | writel(ctl_flags, base_addr + off); | |
3521 | ||
3522 | if (tx->txf.txf_id < 32) | |
3523 | tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id); | |
3524 | else | |
3525 | tx->bna->tx_mod.txf_bmap[0] &= ~((u32) | |
3526 | 1 << (tx->txf.txf_id - 32)); | |
3527 | } | |
3528 | ||
3529 | static void | |
3530 | __bna_txf_stat_clr(struct bna_tx *tx) | |
3531 | { | |
3532 | struct bfi_ll_stats_req ll_req; | |
3533 | u32 txf_bmap[2] = {0, 0}; | |
3534 | if (tx->txf.txf_id < 32) | |
3535 | txf_bmap[0] = ((u32)1 << tx->txf.txf_id); | |
3536 | else | |
3537 | txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32)); | |
3538 | bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); | |
3539 | ll_req.stats_mask = 0; | |
3540 | ll_req.rxf_id_mask[0] = 0; | |
3541 | ll_req.rxf_id_mask[1] = 0; | |
3542 | ll_req.txf_id_mask[0] = htonl(txf_bmap[0]); | |
3543 | ll_req.txf_id_mask[1] = htonl(txf_bmap[1]); | |
3544 | ||
3545 | bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req), | |
3546 | bna_tx_cb_stats_cleared, tx); | |
3547 | bna_mbox_send(tx->bna, &tx->mbox_qe); | |
3548 | } | |
3549 | ||
3550 | static void | |
3551 | __bna_tx_start(struct bna_tx *tx) | |
3552 | { | |
3553 | struct bna_txq *txq; | |
3554 | struct list_head *qe; | |
3555 | ||
3556 | list_for_each(qe, &tx->txq_q) { | |
3557 | txq = (struct bna_txq *)qe; | |
3558 | bna_ib_start(txq->ib); | |
3559 | __bna_txq_start(tx, txq); | |
3560 | } | |
3561 | ||
3562 | __bna_txf_start(tx); | |
3563 | ||
3564 | list_for_each(qe, &tx->txq_q) { | |
3565 | txq = (struct bna_txq *)qe; | |
3566 | txq->tcb->priority = txq->priority; | |
3567 | (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb); | |
3568 | } | |
3569 | } | |
3570 | ||
3571 | static void | |
3572 | __bna_tx_stop(struct bna_tx *tx) | |
3573 | { | |
3574 | struct bna_txq *txq; | |
3575 | struct list_head *qe; | |
3576 | ||
3577 | list_for_each(qe, &tx->txq_q) { | |
3578 | txq = (struct bna_txq *)qe; | |
3579 | (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb); | |
3580 | } | |
3581 | ||
3582 | __bna_txf_stop(tx); | |
3583 | ||
3584 | list_for_each(qe, &tx->txq_q) { | |
3585 | txq = (struct bna_txq *)qe; | |
3586 | bfa_wc_up(&tx->txq_stop_wc); | |
3587 | } | |
3588 | ||
3589 | list_for_each(qe, &tx->txq_q) { | |
3590 | txq = (struct bna_txq *)qe; | |
3591 | __bna_txq_stop(tx, txq); | |
3592 | } | |
3593 | } | |
3594 | ||
3595 | static void | |
3596 | bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, | |
3597 | struct bna_mem_descr *qpt_mem, | |
3598 | struct bna_mem_descr *swqpt_mem, | |
3599 | struct bna_mem_descr *page_mem) | |
3600 | { | |
3601 | int i; | |
3602 | ||
3603 | txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; | |
3604 | txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; | |
3605 | txq->qpt.kv_qpt_ptr = qpt_mem->kva; | |
3606 | txq->qpt.page_count = page_count; | |
3607 | txq->qpt.page_size = page_size; | |
3608 | ||
3609 | txq->tcb->sw_qpt = (void **) swqpt_mem->kva; | |
3610 | ||
3611 | for (i = 0; i < page_count; i++) { | |
3612 | txq->tcb->sw_qpt[i] = page_mem[i].kva; | |
3613 | ||
3614 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = | |
3615 | page_mem[i].dma.lsb; | |
3616 | ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = | |
3617 | page_mem[i].dma.msb; | |
3618 | ||
3619 | } | |
3620 | } | |
3621 | ||
3622 | static void | |
3623 | bna_tx_free(struct bna_tx *tx) | |
3624 | { | |
3625 | struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; | |
3626 | struct bna_txq *txq; | |
3627 | struct bna_ib_mod *ib_mod = &tx->bna->ib_mod; | |
3628 | struct list_head *qe; | |
3629 | ||
3630 | while (!list_empty(&tx->txq_q)) { | |
3631 | bfa_q_deq(&tx->txq_q, &txq); | |
3632 | bfa_q_qe_init(&txq->qe); | |
3633 | if (txq->ib) { | |
3634 | if (txq->ib_seg_offset != -1) | |
3635 | bna_ib_release_idx(txq->ib, | |
3636 | txq->ib_seg_offset); | |
3637 | bna_ib_put(ib_mod, txq->ib); | |
3638 | txq->ib = NULL; | |
3639 | } | |
3640 | txq->tcb = NULL; | |
3641 | txq->tx = NULL; | |
3642 | list_add_tail(&txq->qe, &tx_mod->txq_free_q); | |
3643 | } | |
3644 | ||
3645 | list_for_each(qe, &tx_mod->tx_active_q) { | |
3646 | if (qe == &tx->qe) { | |
3647 | list_del(&tx->qe); | |
3648 | bfa_q_qe_init(&tx->qe); | |
3649 | break; | |
3650 | } | |
3651 | } | |
3652 | ||
3653 | tx->bna = NULL; | |
3654 | tx->priv = NULL; | |
3655 | list_add_tail(&tx->qe, &tx_mod->tx_free_q); | |
3656 | } | |
3657 | ||
3658 | static void | |
3659 | bna_tx_cb_txq_stopped(void *arg, int status) | |
3660 | { | |
3661 | struct bna_tx *tx = (struct bna_tx *)arg; | |
3662 | ||
3663 | bfa_q_qe_init(&tx->mbox_qe.qe); | |
3664 | bfa_wc_down(&tx->txq_stop_wc); | |
3665 | } | |
3666 | ||
3667 | static void | |
3668 | bna_tx_cb_txq_stopped_all(void *arg) | |
3669 | { | |
3670 | struct bna_tx *tx = (struct bna_tx *)arg; | |
3671 | ||
3672 | bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED); | |
3673 | } | |
3674 | ||
3675 | static void | |
3676 | bna_tx_cb_stats_cleared(void *arg, int status) | |
3677 | { | |
3678 | struct bna_tx *tx = (struct bna_tx *)arg; | |
3679 | ||
3680 | bfa_q_qe_init(&tx->mbox_qe.qe); | |
3681 | ||
3682 | bfa_fsm_send_event(tx, TX_E_STAT_CLEARED); | |
3683 | } | |
3684 | ||
3685 | static void | |
3686 | bna_tx_start(struct bna_tx *tx) | |
3687 | { | |
3688 | tx->flags |= BNA_TX_F_PORT_STARTED; | |
3689 | if (tx->flags & BNA_TX_F_ENABLED) | |
3690 | bfa_fsm_send_event(tx, TX_E_START); | |
3691 | } | |
3692 | ||
3693 | static void | |
3694 | bna_tx_stop(struct bna_tx *tx) | |
3695 | { | |
3696 | tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; | |
3697 | tx->stop_cbarg = &tx->bna->tx_mod; | |
3698 | ||
3699 | tx->flags &= ~BNA_TX_F_PORT_STARTED; | |
3700 | bfa_fsm_send_event(tx, TX_E_STOP); | |
3701 | } | |
3702 | ||
3703 | static void | |
3704 | bna_tx_fail(struct bna_tx *tx) | |
3705 | { | |
3706 | tx->flags &= ~BNA_TX_F_PORT_STARTED; | |
3707 | bfa_fsm_send_event(tx, TX_E_FAIL); | |
3708 | } | |
3709 | ||
b7ee31c5 | 3710 | static void |
8b230ed8 RM |
3711 | bna_tx_prio_changed(struct bna_tx *tx, int prio) |
3712 | { | |
3713 | struct bna_txq *txq; | |
3714 | struct list_head *qe; | |
3715 | ||
3716 | list_for_each(qe, &tx->txq_q) { | |
3717 | txq = (struct bna_txq *)qe; | |
3718 | txq->priority = prio; | |
3719 | } | |
3720 | ||
3721 | bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE); | |
3722 | } | |
3723 | ||
3724 | static void | |
3725 | bna_tx_cee_link_status(struct bna_tx *tx, int cee_link) | |
3726 | { | |
3727 | if (cee_link) | |
3728 | tx->flags |= BNA_TX_F_PRIO_LOCK; | |
3729 | else | |
3730 | tx->flags &= ~BNA_TX_F_PRIO_LOCK; | |
3731 | } | |
3732 | ||
3733 | static void | |
3734 | bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx, | |
3735 | enum bna_cb_status status) | |
3736 | { | |
3737 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3738 | ||
3739 | bfa_wc_down(&tx_mod->tx_stop_wc); | |
3740 | } | |
3741 | ||
3742 | static void | |
3743 | bna_tx_mod_cb_tx_stopped_all(void *arg) | |
3744 | { | |
3745 | struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; | |
3746 | ||
3747 | if (tx_mod->stop_cbfn) | |
3748 | tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS); | |
3749 | tx_mod->stop_cbfn = NULL; | |
3750 | } | |
3751 | ||
3752 | void | |
3753 | bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) | |
3754 | { | |
3755 | u32 q_size; | |
3756 | u32 page_count; | |
3757 | struct bna_mem_info *mem_info; | |
3758 | ||
3759 | res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; | |
3760 | mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; | |
3761 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3762 | mem_info->len = sizeof(struct bna_tcb); | |
3763 | mem_info->num = num_txq; | |
3764 | ||
3765 | q_size = txq_depth * BFI_TXQ_WI_SIZE; | |
3766 | q_size = ALIGN(q_size, PAGE_SIZE); | |
3767 | page_count = q_size >> PAGE_SHIFT; | |
3768 | ||
3769 | res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; | |
3770 | mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; | |
3771 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3772 | mem_info->len = page_count * sizeof(struct bna_dma_addr); | |
3773 | mem_info->num = num_txq; | |
3774 | ||
3775 | res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; | |
3776 | mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; | |
3777 | mem_info->mem_type = BNA_MEM_T_KVA; | |
3778 | mem_info->len = page_count * sizeof(void *); | |
3779 | mem_info->num = num_txq; | |
3780 | ||
3781 | res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; | |
3782 | mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; | |
3783 | mem_info->mem_type = BNA_MEM_T_DMA; | |
3784 | mem_info->len = PAGE_SIZE; | |
3785 | mem_info->num = num_txq * page_count; | |
3786 | ||
3787 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; | |
3788 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = | |
3789 | BNA_INTR_T_MSIX; | |
3790 | res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; | |
3791 | } | |
3792 | ||
3793 | struct bna_tx * | |
3794 | bna_tx_create(struct bna *bna, struct bnad *bnad, | |
3795 | struct bna_tx_config *tx_cfg, | |
3796 | struct bna_tx_event_cbfn *tx_cbfn, | |
3797 | struct bna_res_info *res_info, void *priv) | |
3798 | { | |
3799 | struct bna_intr_info *intr_info; | |
3800 | struct bna_tx_mod *tx_mod = &bna->tx_mod; | |
3801 | struct bna_tx *tx; | |
3802 | struct bna_txq *txq; | |
3803 | struct list_head *qe; | |
3804 | struct bna_ib_mod *ib_mod = &bna->ib_mod; | |
3805 | struct bna_doorbell_qset *qset; | |
3806 | struct bna_ib_config ib_config; | |
3807 | int page_count; | |
3808 | int page_size; | |
3809 | int page_idx; | |
3810 | int i; | |
3811 | unsigned long off; | |
3812 | ||
3813 | intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
3814 | page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / | |
3815 | tx_cfg->num_txq; | |
3816 | page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len; | |
3817 | ||
3818 | /** | |
3819 | * Get resources | |
3820 | */ | |
3821 | ||
3822 | if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) | |
3823 | return NULL; | |
3824 | ||
3825 | /* Tx */ | |
3826 | ||
3827 | if (list_empty(&tx_mod->tx_free_q)) | |
3828 | return NULL; | |
3829 | bfa_q_deq(&tx_mod->tx_free_q, &tx); | |
3830 | bfa_q_qe_init(&tx->qe); | |
3831 | ||
3832 | /* TxQs */ | |
3833 | ||
3834 | INIT_LIST_HEAD(&tx->txq_q); | |
3835 | for (i = 0; i < tx_cfg->num_txq; i++) { | |
3836 | if (list_empty(&tx_mod->txq_free_q)) | |
3837 | goto err_return; | |
3838 | ||
3839 | bfa_q_deq(&tx_mod->txq_free_q, &txq); | |
3840 | bfa_q_qe_init(&txq->qe); | |
3841 | list_add_tail(&txq->qe, &tx->txq_q); | |
3842 | txq->ib = NULL; | |
3843 | txq->ib_seg_offset = -1; | |
3844 | txq->tx = tx; | |
3845 | } | |
3846 | ||
3847 | /* IBs */ | |
3848 | i = 0; | |
3849 | list_for_each(qe, &tx->txq_q) { | |
3850 | txq = (struct bna_txq *)qe; | |
3851 | ||
3852 | if (intr_info->num == 1) | |
3853 | txq->ib = bna_ib_get(ib_mod, intr_info->intr_type, | |
3854 | intr_info->idl[0].vector); | |
3855 | else | |
3856 | txq->ib = bna_ib_get(ib_mod, intr_info->intr_type, | |
3857 | intr_info->idl[i].vector); | |
3858 | ||
3859 | if (txq->ib == NULL) | |
3860 | goto err_return; | |
3861 | ||
3862 | txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib); | |
3863 | if (txq->ib_seg_offset == -1) | |
3864 | goto err_return; | |
3865 | ||
3866 | i++; | |
3867 | } | |
3868 | ||
3869 | /* | |
3870 | * Initialize | |
3871 | */ | |
3872 | ||
3873 | /* Tx */ | |
3874 | ||
3875 | tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; | |
3876 | tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; | |
3877 | /* Following callbacks are mandatory */ | |
3878 | tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; | |
3879 | tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; | |
3880 | tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; | |
3881 | ||
3882 | list_add_tail(&tx->qe, &tx_mod->tx_active_q); | |
3883 | tx->bna = bna; | |
3884 | tx->priv = priv; | |
3885 | tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all; | |
3886 | tx->txq_stop_wc.wc_cbarg = tx; | |
3887 | tx->txq_stop_wc.wc_count = 0; | |
3888 | ||
3889 | tx->type = tx_cfg->tx_type; | |
3890 | ||
3891 | tx->flags = 0; | |
3892 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) { | |
3893 | switch (tx->type) { | |
3894 | case BNA_TX_T_REGULAR: | |
3895 | if (!(tx->bna->tx_mod.flags & | |
3896 | BNA_TX_MOD_F_PORT_LOOPBACK)) | |
3897 | tx->flags |= BNA_TX_F_PORT_STARTED; | |
3898 | break; | |
3899 | case BNA_TX_T_LOOPBACK: | |
3900 | if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK) | |
3901 | tx->flags |= BNA_TX_F_PORT_STARTED; | |
3902 | break; | |
3903 | } | |
3904 | } | |
3905 | if (tx->bna->tx_mod.cee_link) | |
3906 | tx->flags |= BNA_TX_F_PRIO_LOCK; | |
3907 | ||
3908 | /* TxQ */ | |
3909 | ||
3910 | i = 0; | |
3911 | page_idx = 0; | |
3912 | list_for_each(qe, &tx->txq_q) { | |
3913 | txq = (struct bna_txq *)qe; | |
3914 | txq->priority = tx_mod->priority; | |
3915 | txq->tcb = (struct bna_tcb *) | |
3916 | res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; | |
3917 | txq->tx_packets = 0; | |
3918 | txq->tx_bytes = 0; | |
3919 | ||
3920 | /* IB */ | |
3921 | ||
3922 | ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO; | |
3923 | ib_config.interpkt_timeo = 0; /* Not used */ | |
3924 | ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT; | |
3925 | ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA | | |
3926 | BFI_IB_CF_INT_ENABLE | | |
3927 | BFI_IB_CF_COALESCING_MODE); | |
3928 | bna_ib_config(txq->ib, &ib_config); | |
3929 | ||
3930 | /* TCB */ | |
3931 | ||
3932 | txq->tcb->producer_index = 0; | |
3933 | txq->tcb->consumer_index = 0; | |
3934 | txq->tcb->hw_consumer_index = (volatile u32 *) | |
3935 | ((volatile u8 *)txq->ib->ib_seg_host_addr_kva + | |
3936 | (txq->ib_seg_offset * BFI_IBIDX_SIZE)); | |
3937 | *(txq->tcb->hw_consumer_index) = 0; | |
3938 | txq->tcb->q_depth = tx_cfg->txq_depth; | |
3939 | txq->tcb->unmap_q = (void *) | |
3940 | res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; | |
3941 | qset = (struct bna_doorbell_qset *)0; | |
3942 | off = (unsigned long)&qset[txq->txq_id].txq[0]; | |
3943 | txq->tcb->q_dbell = off + | |
3944 | BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva); | |
3945 | txq->tcb->i_dbell = &txq->ib->door_bell; | |
3946 | txq->tcb->intr_type = intr_info->intr_type; | |
3947 | txq->tcb->intr_vector = (intr_info->num == 1) ? | |
3948 | intr_info->idl[0].vector : | |
3949 | intr_info->idl[i].vector; | |
3950 | txq->tcb->txq = txq; | |
3951 | txq->tcb->bnad = bnad; | |
3952 | txq->tcb->id = i; | |
3953 | ||
3954 | /* QPT, SWQPT, Pages */ | |
3955 | bna_txq_qpt_setup(txq, page_count, page_size, | |
3956 | &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], | |
3957 | &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], | |
3958 | &res_info[BNA_TX_RES_MEM_T_PAGE]. | |
3959 | res_u.mem_info.mdl[page_idx]); | |
3960 | txq->tcb->page_idx = page_idx; | |
3961 | txq->tcb->page_count = page_count; | |
3962 | page_idx += page_count; | |
3963 | ||
3964 | /* Callback to bnad for setting up TCB */ | |
3965 | if (tx->tcb_setup_cbfn) | |
3966 | (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); | |
3967 | ||
3968 | i++; | |
3969 | } | |
3970 | ||
3971 | /* TxF */ | |
3972 | ||
3973 | tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED; | |
3974 | tx->txf.vlan = 0; | |
3975 | ||
3976 | /* Mbox element */ | |
3977 | bfa_q_qe_init(&tx->mbox_qe.qe); | |
3978 | ||
3979 | bfa_fsm_set_state(tx, bna_tx_sm_stopped); | |
3980 | ||
3981 | return tx; | |
3982 | ||
3983 | err_return: | |
3984 | bna_tx_free(tx); | |
3985 | return NULL; | |
3986 | } | |
3987 | ||
3988 | void | |
3989 | bna_tx_destroy(struct bna_tx *tx) | |
3990 | { | |
3991 | /* Callback to bnad for destroying TCB */ | |
3992 | if (tx->tcb_destroy_cbfn) { | |
3993 | struct bna_txq *txq; | |
3994 | struct list_head *qe; | |
3995 | ||
3996 | list_for_each(qe, &tx->txq_q) { | |
3997 | txq = (struct bna_txq *)qe; | |
3998 | (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); | |
3999 | } | |
4000 | } | |
4001 | ||
4002 | bna_tx_free(tx); | |
4003 | } | |
4004 | ||
4005 | void | |
4006 | bna_tx_enable(struct bna_tx *tx) | |
4007 | { | |
4008 | if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) | |
4009 | return; | |
4010 | ||
4011 | tx->flags |= BNA_TX_F_ENABLED; | |
4012 | ||
4013 | if (tx->flags & BNA_TX_F_PORT_STARTED) | |
4014 | bfa_fsm_send_event(tx, TX_E_START); | |
4015 | } | |
4016 | ||
4017 | void | |
4018 | bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, | |
4019 | void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status)) | |
4020 | { | |
4021 | if (type == BNA_SOFT_CLEANUP) { | |
4022 | (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS); | |
4023 | return; | |
4024 | } | |
4025 | ||
4026 | tx->stop_cbfn = cbfn; | |
4027 | tx->stop_cbarg = tx->bna->bnad; | |
4028 | ||
4029 | tx->flags &= ~BNA_TX_F_ENABLED; | |
4030 | ||
4031 | bfa_fsm_send_event(tx, TX_E_STOP); | |
4032 | } | |
4033 | ||
4034 | int | |
4035 | bna_tx_state_get(struct bna_tx *tx) | |
4036 | { | |
4037 | return bfa_sm_to_state(tx_sm_table, tx->fsm); | |
4038 | } | |
4039 | ||
4040 | void | |
4041 | bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, | |
4042 | struct bna_res_info *res_info) | |
4043 | { | |
4044 | int i; | |
4045 | ||
4046 | tx_mod->bna = bna; | |
4047 | tx_mod->flags = 0; | |
4048 | ||
4049 | tx_mod->tx = (struct bna_tx *) | |
4050 | res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; | |
4051 | tx_mod->txq = (struct bna_txq *) | |
4052 | res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; | |
4053 | ||
4054 | INIT_LIST_HEAD(&tx_mod->tx_free_q); | |
4055 | INIT_LIST_HEAD(&tx_mod->tx_active_q); | |
4056 | ||
4057 | INIT_LIST_HEAD(&tx_mod->txq_free_q); | |
4058 | ||
4059 | for (i = 0; i < BFI_MAX_TXQ; i++) { | |
4060 | tx_mod->tx[i].txf.txf_id = i; | |
4061 | bfa_q_qe_init(&tx_mod->tx[i].qe); | |
4062 | list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); | |
4063 | ||
4064 | tx_mod->txq[i].txq_id = i; | |
4065 | bfa_q_qe_init(&tx_mod->txq[i].qe); | |
4066 | list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); | |
4067 | } | |
4068 | ||
4069 | tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all; | |
4070 | tx_mod->tx_stop_wc.wc_cbarg = tx_mod; | |
4071 | tx_mod->tx_stop_wc.wc_count = 0; | |
4072 | } | |
4073 | ||
4074 | void | |
4075 | bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) | |
4076 | { | |
4077 | struct list_head *qe; | |
4078 | int i; | |
4079 | ||
4080 | i = 0; | |
4081 | list_for_each(qe, &tx_mod->tx_free_q) | |
4082 | i++; | |
4083 | ||
4084 | i = 0; | |
4085 | list_for_each(qe, &tx_mod->txq_free_q) | |
4086 | i++; | |
4087 | ||
4088 | tx_mod->bna = NULL; | |
4089 | } | |
4090 | ||
4091 | void | |
4092 | bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
4093 | { | |
4094 | struct bna_tx *tx; | |
4095 | struct list_head *qe; | |
4096 | ||
4097 | tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED; | |
4098 | if (type == BNA_TX_T_LOOPBACK) | |
4099 | tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK; | |
4100 | ||
4101 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4102 | tx = (struct bna_tx *)qe; | |
4103 | if (tx->type == type) | |
4104 | bna_tx_start(tx); | |
4105 | } | |
4106 | } | |
4107 | ||
4108 | void | |
4109 | bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) | |
4110 | { | |
4111 | struct bna_tx *tx; | |
4112 | struct list_head *qe; | |
4113 | ||
4114 | tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED; | |
4115 | tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK; | |
4116 | ||
4117 | tx_mod->stop_cbfn = bna_port_cb_tx_stopped; | |
4118 | ||
4119 | /** | |
4120 | * Before calling bna_tx_stop(), increment tx_stop_wc as many times | |
4121 | * as we are going to call bna_tx_stop | |
4122 | */ | |
4123 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4124 | tx = (struct bna_tx *)qe; | |
4125 | if (tx->type == type) | |
4126 | bfa_wc_up(&tx_mod->tx_stop_wc); | |
4127 | } | |
4128 | ||
4129 | if (tx_mod->tx_stop_wc.wc_count == 0) { | |
4130 | tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS); | |
4131 | tx_mod->stop_cbfn = NULL; | |
4132 | return; | |
4133 | } | |
4134 | ||
4135 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4136 | tx = (struct bna_tx *)qe; | |
4137 | if (tx->type == type) | |
4138 | bna_tx_stop(tx); | |
4139 | } | |
4140 | } | |
4141 | ||
4142 | void | |
4143 | bna_tx_mod_fail(struct bna_tx_mod *tx_mod) | |
4144 | { | |
4145 | struct bna_tx *tx; | |
4146 | struct list_head *qe; | |
4147 | ||
4148 | tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED; | |
4149 | tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK; | |
4150 | ||
4151 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4152 | tx = (struct bna_tx *)qe; | |
4153 | bna_tx_fail(tx); | |
4154 | } | |
4155 | } | |
4156 | ||
4157 | void | |
4158 | bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio) | |
4159 | { | |
4160 | struct bna_tx *tx; | |
4161 | struct list_head *qe; | |
4162 | ||
4163 | if (prio != tx_mod->priority) { | |
4164 | tx_mod->priority = prio; | |
4165 | ||
4166 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4167 | tx = (struct bna_tx *)qe; | |
4168 | bna_tx_prio_changed(tx, prio); | |
4169 | } | |
4170 | } | |
4171 | } | |
4172 | ||
4173 | void | |
4174 | bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link) | |
4175 | { | |
4176 | struct bna_tx *tx; | |
4177 | struct list_head *qe; | |
4178 | ||
4179 | tx_mod->cee_link = cee_link; | |
4180 | ||
4181 | list_for_each(qe, &tx_mod->tx_active_q) { | |
4182 | tx = (struct bna_tx *)qe; | |
4183 | bna_tx_cee_link_status(tx, cee_link); | |
4184 | } | |
4185 | } |