To prevent malicious usage, all tables of pointers must be const.
Compile tested only.
Gleaned for PAX.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct bfi_ioc_attr *attr;
struct bfa_ioc_cbfn *cbfn;
struct bfa_ioc_mbox_mod mbox_mod;
- struct bfa_ioc_hwif *ioc_hwif;
+ const struct bfa_ioc_hwif *ioc_hwif;
struct bfa_iocpf iocpf;
enum bfi_asic_gen asic_gen;
enum bfi_asic_mode asic_mode;
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
-static struct bfa_ioc_hwif nw_hwif_ct;
-
-static void
-bfa_ioc_set_ctx_hwif(struct bfa_ioc *ioc, struct bfa_ioc_hwif *hwif)
-{
- hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
- hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
- hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
- hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
- hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
- hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
- hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
- hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
- hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
-}
+static const struct bfa_ioc_hwif nw_hwif_ct = {
+ .ioc_pll_init = bfa_ioc_ct_pll_init,
+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
+ .ioc_reg_init = bfa_ioc_ct_reg_init,
+ .ioc_map_port = bfa_ioc_ct_map_port,
+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
+ .ioc_sync_start = bfa_ioc_ct_sync_start,
+ .ioc_sync_join = bfa_ioc_ct_sync_join,
+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
+ .ioc_sync_complete = bfa_ioc_ct_sync_complete,
+};
/**
* Called from bfa_ioc_attach() to map asic specific calls.
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
- bfa_ioc_set_ctx_hwif(ioc, &nw_hwif_ct);
-
- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
ioc->ioc_hwif = &nw_hwif_ct;
}
struct bna_res_info *res_info);
struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
- struct bna_tx_event_cbfn *tx_cbfn,
+ const struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv);
void bna_tx_destroy(struct bna_tx *tx);
void bna_tx_enable(struct bna_tx *tx);
struct bna_res_info *res_info);
struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
- struct bna_rx_event_cbfn *rx_cbfn,
+ const struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info, void *priv);
void bna_rx_destroy(struct bna_rx *rx);
void bna_rx_enable(struct bna_rx *rx);
struct bna_rx *
bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
- struct bna_rx_event_cbfn *rx_cbfn,
+ const struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info,
void *priv)
{
struct bna_tx *
bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
- struct bna_tx_event_cbfn *tx_cbfn,
+ const struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv)
{
struct bna_intr_info *intr_info;
struct bna_intr_info *intr_info =
&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
- struct bna_tx_event_cbfn tx_cbfn;
+ static const struct bna_tx_event_cbfn tx_cbfn = {
+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
+ .tx_stall_cbfn = bnad_cb_tx_stall,
+ .tx_resume_cbfn = bnad_cb_tx_resume,
+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
+ };
+
struct bna_tx *tx;
unsigned long flags;
tx_config->tx_type = BNA_TX_T_REGULAR;
tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
- /* Initialize the tx event handlers */
- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
-
/* Get BNA's resource requirement for one tx object */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_tx_res_req(bnad->num_txq_per_tx,
struct bna_intr_info *intr_info =
&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
- struct bna_rx_event_cbfn rx_cbfn;
+ static const struct bna_rx_event_cbfn rx_cbfn = {
+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
+ .rx_post_cbfn = bnad_cb_rx_post,
+ };
struct bna_rx *rx;
unsigned long flags;
/* Initialize the Rx object configuration */
bnad_init_rx_config(bnad, rx_config);
- /* Initialize the Rx event handlers */
- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
-
/* Get BNA's resource requirement for one Rx object */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_res_req(rx_config, res_info);