bna: make function tables cont
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / brocade / bna / bna.h
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
19dbff9f
RM
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
8b230ed8
RM
18#ifndef __BNA_H__
19#define __BNA_H__
20
19dbff9f 21#include "bfa_defs.h"
8b230ed8 22#include "bfa_ioc.h"
19dbff9f 23#include "bfi_enet.h"
8b230ed8
RM
24#include "bna_types.h"
25
b7ee31c5 26extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
8b230ed8
RM
27
28/**
29 *
30 * Macros and constants
31 *
32 */
33
34#define BNA_IOC_TIMER_FREQ 200
35
36/* Log string size */
37#define BNA_MESSAGE_SIZE 256
38
078086f3 39#define bna_is_small_rxq(_id) ((_id) & 0x1)
8b230ed8
RM
40
41#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
42 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
43
44#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
45
46#define BNA_TO_POWER_OF_2(x) \
47do { \
48 int _shift = 0; \
49 while ((x) && (x) != 1) { \
50 (x) >>= 1; \
51 _shift++; \
52 } \
53 (x) <<= _shift; \
54} while (0)
55
56#define BNA_TO_POWER_OF_2_HIGH(x) \
57do { \
58 int n = 1; \
59 while (n < (x)) \
60 n <<= 1; \
61 (x) = n; \
62} while (0)
63
64/*
65 * input : _addr-> os dma addr in host endian format,
66 * output : _bna_dma_addr-> pointer to hw dma addr
67 */
68#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
69do { \
70 u64 tmp_addr = \
71 cpu_to_be64((u64)(_addr)); \
72 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
73 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
74} while (0)
75
76/*
77 * input : _bna_dma_addr-> pointer to hw dma addr
78 * output : _addr-> os dma addr in host endian format
79 */
80#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
81do { \
82 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
83 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
84} while (0)
85
86#define containing_rec(addr, type, field) \
0120b99c 87 ((type *)((unsigned char *)(addr) - \
8b230ed8
RM
88 (unsigned char *)(&((type *)0)->field)))
89
90#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
91
92/* TxQ element is 64 bytes */
93#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
94#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
95
96#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
97{ \
98 unsigned int page_index; /* index within a page */ \
99 void *page_addr; \
0120b99c
RM
100 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
101 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
8b230ed8
RM
102 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
103 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
104}
105
106/* RxQ element is 8 bytes */
107#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
108#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
109
110#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
111{ \
112 unsigned int page_index; /* index within a page */ \
113 void *page_addr; \
114 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
115 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
116 page_addr = (_qpt_ptr)[((_qe_idx) >> \
117 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
118 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
119}
120
121/* CQ element is 16 bytes */
122#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
123#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
124
125#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
126{ \
127 unsigned int page_index; /* index within a page */ \
128 void *page_addr; \
129 \
130 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
131 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
132 page_addr = (_qpt_ptr)[((_qe_idx) >> \
133 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
134 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
135}
136
137#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
138 (&((_cast *)(_q_base))[(_qe_idx)])
139
140#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
141
142#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
143 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
144
145#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
146 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
147
148#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
149 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
150 ((_q_depth) - 1))
151
152#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
153 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
154 (_q_depth - 1))
155
156#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
157
158#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
159
160#define BNA_Q_PI_ADD(_q_ptr, _num) \
161 (_q_ptr)->q.producer_index = \
162 (((_q_ptr)->q.producer_index + (_num)) & \
163 ((_q_ptr)->q.q_depth - 1))
164
0120b99c 165#define BNA_Q_CI_ADD(_q_ptr, _num) \
8b230ed8 166 (_q_ptr)->q.consumer_index = \
0120b99c 167 (((_q_ptr)->q.consumer_index + (_num)) \
8b230ed8
RM
168 & ((_q_ptr)->q.q_depth - 1))
169
170#define BNA_Q_FREE_COUNT(_q_ptr) \
171 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
172
0120b99c 173#define BNA_Q_IN_USE_COUNT(_q_ptr) \
8b230ed8
RM
174 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
175
8b230ed8
RM
176#define BNA_LARGE_PKT_SIZE 1000
177
178#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
179do { \
180 if ((_len) > BNA_LARGE_PKT_SIZE) { \
181 (_pkt)->large_pkt_cnt++; \
182 } else { \
183 (_pkt)->small_pkt_cnt++; \
184 } \
185} while (0)
186
078086f3
RM
187#define call_rxf_stop_cbfn(rxf) \
188do { \
8b230ed8 189 if ((rxf)->stop_cbfn) { \
078086f3
RM
190 void (*cbfn)(struct bna_rx *); \
191 struct bna_rx *cbarg; \
192 cbfn = (rxf)->stop_cbfn; \
193 cbarg = (rxf)->stop_cbarg; \
8b230ed8
RM
194 (rxf)->stop_cbfn = NULL; \
195 (rxf)->stop_cbarg = NULL; \
078086f3
RM
196 cbfn(cbarg); \
197 } \
198} while (0)
8b230ed8 199
078086f3
RM
200#define call_rxf_start_cbfn(rxf) \
201do { \
8b230ed8 202 if ((rxf)->start_cbfn) { \
078086f3
RM
203 void (*cbfn)(struct bna_rx *); \
204 struct bna_rx *cbarg; \
205 cbfn = (rxf)->start_cbfn; \
206 cbarg = (rxf)->start_cbarg; \
8b230ed8
RM
207 (rxf)->start_cbfn = NULL; \
208 (rxf)->start_cbarg = NULL; \
078086f3
RM
209 cbfn(cbarg); \
210 } \
211} while (0)
8b230ed8 212
078086f3
RM
213#define call_rxf_cam_fltr_cbfn(rxf) \
214do { \
8b230ed8 215 if ((rxf)->cam_fltr_cbfn) { \
078086f3
RM
216 void (*cbfn)(struct bnad *, struct bna_rx *); \
217 struct bnad *cbarg; \
218 cbfn = (rxf)->cam_fltr_cbfn; \
219 cbarg = (rxf)->cam_fltr_cbarg; \
8b230ed8
RM
220 (rxf)->cam_fltr_cbfn = NULL; \
221 (rxf)->cam_fltr_cbarg = NULL; \
078086f3
RM
222 cbfn(cbarg, rxf->rx); \
223 } \
224} while (0)
8b230ed8 225
078086f3
RM
226#define call_rxf_pause_cbfn(rxf) \
227do { \
8b230ed8 228 if ((rxf)->oper_state_cbfn) { \
078086f3
RM
229 void (*cbfn)(struct bnad *, struct bna_rx *); \
230 struct bnad *cbarg; \
231 cbfn = (rxf)->oper_state_cbfn; \
232 cbarg = (rxf)->oper_state_cbarg; \
8b230ed8
RM
233 (rxf)->oper_state_cbfn = NULL; \
234 (rxf)->oper_state_cbarg = NULL; \
078086f3
RM
235 cbfn(cbarg, rxf->rx); \
236 } \
237} while (0)
8b230ed8 238
078086f3 239#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
8b230ed8
RM
240
241#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
242
243#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
244
245#define xxx_enable(mode, bitmask, xxx) \
246do { \
247 bitmask |= xxx; \
248 mode |= xxx; \
249} while (0)
250
251#define xxx_disable(mode, bitmask, xxx) \
252do { \
253 bitmask |= xxx; \
254 mode &= ~xxx; \
255} while (0)
256
257#define xxx_inactive(mode, bitmask, xxx) \
258do { \
259 bitmask &= ~xxx; \
260 mode &= ~xxx; \
261} while (0)
262
263#define is_promisc_enable(mode, bitmask) \
264 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
265
266#define is_promisc_disable(mode, bitmask) \
267 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
268
269#define promisc_enable(mode, bitmask) \
270 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
271
272#define promisc_disable(mode, bitmask) \
273 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
274
275#define promisc_inactive(mode, bitmask) \
276 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
277
278#define is_default_enable(mode, bitmask) \
279 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
280
281#define is_default_disable(mode, bitmask) \
282 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
283
284#define default_enable(mode, bitmask) \
285 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
286
287#define default_disable(mode, bitmask) \
288 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
289
290#define default_inactive(mode, bitmask) \
291 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
292
293#define is_allmulti_enable(mode, bitmask) \
294 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
295
296#define is_allmulti_disable(mode, bitmask) \
297 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
298
299#define allmulti_enable(mode, bitmask) \
300 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
301
302#define allmulti_disable(mode, bitmask) \
303 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
304
305#define allmulti_inactive(mode, bitmask) \
306 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
307
308#define GET_RXQS(rxp, q0, q1) do { \
309 switch ((rxp)->type) { \
310 case BNA_RXP_SINGLE: \
311 (q0) = rxp->rxq.single.only; \
312 (q1) = NULL; \
313 break; \
314 case BNA_RXP_SLR: \
315 (q0) = rxp->rxq.slr.large; \
316 (q1) = rxp->rxq.slr.small; \
317 break; \
318 case BNA_RXP_HDS: \
319 (q0) = rxp->rxq.hds.data; \
320 (q1) = rxp->rxq.hds.hdr; \
321 break; \
322 } \
323} while (0)
324
078086f3
RM
325#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask)
326
327#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
328
329#define bna_tx_from_rid(_bna, _rid, _tx) \
330do { \
331 struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
332 struct bna_tx *__tx; \
333 struct list_head *qe; \
334 _tx = NULL; \
335 list_for_each(qe, &__tx_mod->tx_active_q) { \
336 __tx = (struct bna_tx *)qe; \
337 if (__tx->rid == (_rid)) { \
338 (_tx) = __tx; \
339 break; \
340 } \
341 } \
342} while (0)
343
344#define bna_rx_from_rid(_bna, _rid, _rx) \
345do { \
346 struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
347 struct bna_rx *__rx; \
348 struct list_head *qe; \
349 _rx = NULL; \
350 list_for_each(qe, &__rx_mod->rx_active_q) { \
351 __rx = (struct bna_rx *)qe; \
352 if (__rx->rid == (_rid)) { \
353 (_rx) = __rx; \
354 break; \
355 } \
356 } \
357} while (0)
358
359/**
360 *
361 * Inline functions
362 *
363 */
364
365static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
366{
367 struct bna_mac *mac = NULL;
368 struct list_head *qe;
369 list_for_each(qe, q) {
370 if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
371 mac = (struct bna_mac *)qe;
372 break;
373 }
374 }
375 return mac;
376}
377
378#define bna_attr(_bna) (&(_bna)->ioceth.attr)
379
8b230ed8
RM
380/**
381 *
382 * Function prototypes
383 *
384 */
385
386/**
387 * BNA
388 */
389
078086f3
RM
390/* FW response handlers */
391void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
392
8b230ed8
RM
393/* APIs for BNAD */
394void bna_res_req(struct bna_res_info *res_info);
078086f3 395void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info);
8b230ed8
RM
396void bna_init(struct bna *bna, struct bnad *bnad,
397 struct bfa_pcidev *pcidev,
398 struct bna_res_info *res_info);
078086f3 399void bna_mod_init(struct bna *bna, struct bna_res_info *res_info);
8b230ed8 400void bna_uninit(struct bna *bna);
078086f3
RM
401int bna_num_txq_set(struct bna *bna, int num_txq);
402int bna_num_rxp_set(struct bna *bna, int num_rxp);
078086f3 403void bna_hw_stats_get(struct bna *bna);
8b230ed8 404
8b230ed8
RM
405/* APIs for RxF */
406struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
407void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
408 struct bna_mac *mac);
409struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
410void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
411 struct bna_mac *mac);
078086f3
RM
412struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
413void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
414 struct bna_mcam_handle *handle);
8b230ed8
RM
415
416/**
417 * MBOX
418 */
419
8b230ed8 420/* API for BNAD */
f6d46a2e 421void bna_mbox_handler(struct bna *bna, u32 intr_status);
8b230ed8 422
078086f3
RM
423/**
424 * ETHPORT
425 */
426
427/* Callbacks for RX */
428void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
429void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
430
8b230ed8
RM
431/**
432 * TX MODULE AND TX
433 */
078086f3
RM
434/* FW response handelrs */
435void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
436 struct bfi_msgq_mhdr *msghdr);
437void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx,
438 struct bfi_msgq_mhdr *msghdr);
439void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod);
8b230ed8 440
8b230ed8
RM
441/* APIs for BNA */
442void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
443 struct bna_res_info *res_info);
444void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
8b230ed8 445
078086f3 446/* APIs for ENET */
8b230ed8
RM
447void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
448void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
449void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
8b230ed8
RM
450
451/* APIs for BNAD */
452void bna_tx_res_req(int num_txq, int txq_depth,
453 struct bna_res_info *res_info);
454struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
455 struct bna_tx_config *tx_cfg,
d91d25d5 456 const struct bna_tx_event_cbfn *tx_cbfn,
8b230ed8
RM
457 struct bna_res_info *res_info, void *priv);
458void bna_tx_destroy(struct bna_tx *tx);
459void bna_tx_enable(struct bna_tx *tx);
460void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
078086f3
RM
461 void (*cbfn)(void *, struct bna_tx *));
462void bna_tx_cleanup_complete(struct bna_tx *tx);
8b230ed8
RM
463void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
464
465/**
466 * RX MODULE, RX, RXF
467 */
468
078086f3
RM
469/* FW response handlers */
470void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
471 struct bfi_msgq_mhdr *msghdr);
472void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx,
473 struct bfi_msgq_mhdr *msghdr);
474void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr);
475void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
476 struct bfi_msgq_mhdr *msghdr);
477
8b230ed8
RM
478/* APIs for BNA */
479void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
480 struct bna_res_info *res_info);
481void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
8b230ed8 482
078086f3 483/* APIs for ENET */
8b230ed8
RM
484void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
485void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
486void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
487
488/* APIs for BNAD */
489void bna_rx_res_req(struct bna_rx_config *rx_config,
490 struct bna_res_info *res_info);
491struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
492 struct bna_rx_config *rx_cfg,
d91d25d5 493 const struct bna_rx_event_cbfn *rx_cbfn,
8b230ed8
RM
494 struct bna_res_info *res_info, void *priv);
495void bna_rx_destroy(struct bna_rx *rx);
496void bna_rx_enable(struct bna_rx *rx);
497void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
078086f3
RM
498 void (*cbfn)(void *, struct bna_rx *));
499void bna_rx_cleanup_complete(struct bna_rx *rx);
8b230ed8 500void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
b7ee31c5 501void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
8b230ed8
RM
502void bna_rx_dim_update(struct bna_ccb *ccb);
503enum bna_cb_status
504bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
078086f3
RM
505 void (*cbfn)(struct bnad *, struct bna_rx *));
506enum bna_cb_status
507bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
508 void (*cbfn)(struct bnad *, struct bna_rx *));
509enum bna_cb_status
510bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
511 void (*cbfn)(struct bnad *, struct bna_rx *));
8b230ed8 512enum bna_cb_status
8b230ed8 513bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
078086f3 514 void (*cbfn)(struct bnad *, struct bna_rx *));
8b230ed8 515enum bna_cb_status
8b230ed8 516bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
078086f3 517 void (*cbfn)(struct bnad *, struct bna_rx *));
8b230ed8
RM
518enum bna_cb_status
519bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
520 enum bna_rxmode bitmask,
078086f3 521 void (*cbfn)(struct bnad *, struct bna_rx *));
8b230ed8
RM
522void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
523void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
524void bna_rx_vlanfilter_enable(struct bna_rx *rx);
078086f3
RM
525/**
526 * ENET
527 */
528
529/* API for RX */
530int bna_enet_mtu_get(struct bna_enet *enet);
531
532/* Callbacks for TX, RX */
533void bna_enet_cb_tx_stopped(struct bna_enet *enet);
534void bna_enet_cb_rx_stopped(struct bna_enet *enet);
535
536/* API for BNAD */
537void bna_enet_enable(struct bna_enet *enet);
538void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
539 void (*cbfn)(void *));
540void bna_enet_pause_config(struct bna_enet *enet,
541 struct bna_pause_config *pause_config,
542 void (*cbfn)(struct bnad *));
543void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
544 void (*cbfn)(struct bnad *));
545void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
546
547/**
548 * IOCETH
549 */
550
551/* APIs for BNAD */
552void bna_ioceth_enable(struct bna_ioceth *ioceth);
553void bna_ioceth_disable(struct bna_ioceth *ioceth,
554 enum bna_cleanup_type type);
8b230ed8
RM
555
556/**
557 * BNAD
558 */
559
078086f3
RM
560/* Callbacks for ENET */
561void bnad_cb_ethport_link_status(struct bnad *bnad,
562 enum bna_link_status status);
563
564/* Callbacks for IOCETH */
565void bnad_cb_ioceth_ready(struct bnad *bnad);
566void bnad_cb_ioceth_failed(struct bnad *bnad);
567void bnad_cb_ioceth_disabled(struct bnad *bnad);
568void bnad_cb_mbox_intr_enable(struct bnad *bnad);
569void bnad_cb_mbox_intr_disable(struct bnad *bnad);
570
8b230ed8
RM
571/* Callbacks for BNA */
572void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
573 struct bna_stats *stats);
8b230ed8 574
8b230ed8 575#endif /* __BNA_H__ */