From: David S. Miller Date: Mon, 24 Oct 2011 22:18:09 +0000 (-0400) Subject: Merge branch 'master' of ra.kernel.org:/pub/scm/linux/kernel/git/davem/net X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=1805b2f04855f07afe3a71d620a68f483b0ed74f;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git Merge branch 'master' of ra./linux/kernel/git/davem/net --- 1805b2f04855f07afe3a71d620a68f483b0ed74f diff --cc MAINTAINERS index 5008b087cb74,b3bc88d9c03a..bb4a83af08f2 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -2468,10 -2460,10 +2468,10 @@@ S: Supporte F: drivers/infiniband/hw/ehca/ EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER - M: Breno Leitao + M: Thadeu Lima de Souza Cascardo L: netdev@vger.kernel.org S: Maintained -F: drivers/net/ehea/ +F: drivers/net/ethernet/ibm/ehea/ EMBEDDED LINUX M: Paul Gortmaker diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 2f92487724c6,000000000000..627a5807836d mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@@ -1,2069 -1,0 +1,2075 @@@ +/* bnx2x.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + */ + +#ifndef BNX2X_H +#define BNX2X_H +#include +#include +#include + +/* compilation time flags */ + +/* define this to make the driver freeze on error to allow getting debug info + * (you will need to reboot afterwards) */ +/* #define BNX2X_STOP_ON_ERROR */ + +#define DRV_MODULE_VERSION "1.70.00-0" +#define DRV_MODULE_RELDATE "2011/06/13" +#define BNX2X_BC_VER 0x040200 + +#if defined(CONFIG_DCB) +#define BCM_DCBNL +#endif +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#define BCM_CNIC 1 +#include "../cnic_if.h" +#endif + +#ifdef BCM_CNIC +#define BNX2X_MIN_MSIX_VEC_CNT 3 +#define BNX2X_MSIX_VEC_FP_START 2 +#else +#define BNX2X_MIN_MSIX_VEC_CNT 2 +#define BNX2X_MSIX_VEC_FP_START 1 +#endif + +#include + +#include "bnx2x_reg.h" +#include "bnx2x_fw_defs.h" +#include "bnx2x_hsi.h" +#include "bnx2x_link.h" +#include "bnx2x_sp.h" +#include "bnx2x_dcb.h" +#include "bnx2x_stats.h" + +/* error/debug prints */ + +#define DRV_MODULE_NAME "bnx2x" + +/* for messages that are currently off */ +#define BNX2X_MSG_OFF 0 +#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */ +#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */ +#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ +#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ + +/* regular debug print */ +#define DP(__mask, fmt, ...) \ +do { \ + if (bp->msg_enable & (__mask)) \ + pr_notice("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ +} while (0) + +#define DP_CONT(__mask, fmt, ...) \ +do { \ + if (bp->msg_enable & (__mask)) \ + pr_cont(fmt, ##__VA_ARGS__); \ +} while (0) + +/* errors debug print */ +#define BNX2X_DBG_ERR(fmt, ...) \ +do { \ + if (netif_msg_probe(bp)) \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ +} while (0) + +/* for errors (never masked) */ +#define BNX2X_ERR(fmt, ...) \ +do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + bp->dev ? (bp->dev->name) : "?", \ + ##__VA_ARGS__); \ +} while (0) + +#define BNX2X_ERROR(fmt, ...) \ + pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__) + + +/* before we have a dev->name use dev_info() */ +#define BNX2X_DEV_INFO(fmt, ...) \ +do { \ + if (netif_msg_probe(bp)) \ + dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \ +} while (0) + +#ifdef BNX2X_STOP_ON_ERROR +void bnx2x_int_disable(struct bnx2x *bp); +#define bnx2x_panic() \ +do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_int_disable(bp); \ + bnx2x_panic_dump(bp); \ +} while (0) +#else +#define bnx2x_panic() \ +do { \ + bp->panic = 1; \ + BNX2X_ERR("driver assert\n"); \ + bnx2x_panic_dump(bp); \ +} while (0) +#endif + +#define bnx2x_mc_addr(ha) ((ha)->addr) +#define bnx2x_uc_addr(ha) ((ha)->addr) + +#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) +#define U64_HI(x) (u32)(((u64)(x)) >> 32) +#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) + + +#define REG_ADDR(bp, offset) ((bp->regview) + (offset)) + +#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) +#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) +#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) + +#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) +#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) +#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) + +#define REG_RD_IND(bp, offset) bnx2x_reg_rd_ind(bp, offset) +#define REG_WR_IND(bp, offset, val) bnx2x_reg_wr_ind(bp, offset, val) + +#define REG_RD_DMAE(bp, offset, valp, len32) \ + do { \ + bnx2x_read_dmae(bp, offset, len32);\ + memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \ + } while (0) + +#define REG_WR_DMAE(bp, offset, valp, len32) \ + do { \ + memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \ + bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \ + offset, len32); \ + } while (0) + +#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \ + REG_WR_DMAE(bp, offset, valp, len32) + +#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ + do { \ + memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ + bnx2x_write_big_buf_wb(bp, addr, len32); \ + } while (0) + +#define SHMEM_ADDR(bp, field) (bp->common.shmem_base + \ + offsetof(struct shmem_region, field)) +#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) +#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) + +#define SHMEM2_ADDR(bp, field) (bp->common.shmem2_base + \ + offsetof(struct shmem2_region, field)) +#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) +#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) +#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \ + offsetof(struct mf_cfg, field)) +#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \ + offsetof(struct mf2_cfg, field)) + +#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field)) +#define MF_CFG_WR(bp, field, val) REG_WR(bp,\ + MF_CFG_ADDR(bp, field), (val)) +#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field)) + +#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \ + (SHMEM2_RD((bp), size) > \ + offsetof(struct shmem2_region, field))) + +#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) +#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) + +/* SP SB indices */ + +/* General SP events - stats query, cfc delete, etc */ +#define HC_SP_INDEX_ETH_DEF_CONS 3 + +/* EQ completions */ +#define HC_SP_INDEX_EQ_CONS 7 + +/* FCoE L2 connection completions */ +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4 +/* iSCSI L2 */ +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 + +/* Special clients parameters */ + +/* SB indices */ +/* FCoE L2 */ +#define BNX2X_FCOE_L2_RX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS]) + +#define BNX2X_FCOE_L2_TX_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS]) + +/** + * CIDs and CLIDs: + * CLIDs below is a CLID for func 0, then the CLID for other + * functions will be calculated by the formula: + * + * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X + * + */ - /* iSCSI L2 */ - #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 - #define BNX2X_ISCSI_ETH_CID 49 - - /* FCoE L2 */ - #define BNX2X_FCOE_ETH_CL_ID_IDX 2 - #define BNX2X_FCOE_ETH_CID 50 ++enum { ++ BNX2X_ISCSI_ETH_CL_ID_IDX, ++ BNX2X_FCOE_ETH_CL_ID_IDX, ++ BNX2X_MAX_CNIC_ETH_CL_ID_IDX, ++}; ++ ++#define BNX2X_CNIC_START_ETH_CID 48 ++enum { ++ /* iSCSI L2 */ ++ BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, ++ /* FCoE L2 */ ++ BNX2X_FCOE_ETH_CID, ++}; + +/** Additional rings budgeting */ +#ifdef BCM_CNIC +#define CNIC_PRESENT 1 +#define FCOE_PRESENT 1 +#else +#define CNIC_PRESENT 0 +#define FCOE_PRESENT 0 +#endif /* BCM_CNIC */ +#define NON_ETH_CONTEXT_USE (FCOE_PRESENT) + +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR + +#define SM_RX_ID 0 +#define SM_TX_ID 1 + +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +/* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ +#define MAX_TXQS_PER_COS FP_SB_MAX_E1x + +#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + +/* rules for calculating the cids of tx-only connections */ +#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) +#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) + +/* fp index inside class of service range */ +#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + +/* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) + */ +#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) + +/* fast path */ +struct sw_rx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +struct sw_tx_bd { + struct sk_buff *skb; + u16 first_bd; + u8 flags; +/* Set on the first BD descriptor when there is a split BD */ +#define BNX2X_TSO_SPLIT_BD (1<<0) +}; + +struct sw_rx_page { + struct page *page; + DEFINE_DMA_UNMAP_ADDR(mapping); +}; + +union db_prod { + struct doorbell_set_prod data; + u32 raw; +}; + +/* dropless fc FW/HW related params */ +#define BRB_SIZE(bp) (CHIP_IS_E3(bp) ? 1024 : 512) +#define MAX_AGG_QS(bp) (CHIP_IS_E1(bp) ? \ + ETH_MAX_AGGREGATION_QUEUES_E1 :\ + ETH_MAX_AGGREGATION_QUEUES_E1H_E2) +#define FW_DROP_LEVEL(bp) (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp)) +#define FW_PREFETCH_CNT 16 +#define DROPLESS_FC_HEADROOM 100 + +/* MC hsi */ +#define BCM_PAGE_SHIFT 12 +#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) +#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) +#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) + +#define PAGES_PER_SGE_SHIFT 0 +#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) +#define SGE_PAGE_SIZE PAGE_SIZE +#define SGE_PAGE_SHIFT PAGE_SHIFT +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) + +/* SGE ring related macros */ +#define NUM_RX_SGE_PAGES 2 +#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) +#define NEXT_PAGE_SGE_DESC_CNT 2 +#define MAX_RX_SGE_CNT (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT) +/* RX_SGE_CNT is promised to be a power of 2 */ +#define RX_SGE_MASK (RX_SGE_CNT - 1) +#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) +#define MAX_RX_SGE (NUM_RX_SGE - 1) +#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ + (MAX_RX_SGE_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \ + (x) + 1) +#define RX_SGE(x) ((x) & MAX_RX_SGE) + +/* + * Number of required SGEs is the sum of two: + * 1. Number of possible opened aggregations (next packet for + * these aggregations will probably consume SGE immidiatelly) + * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only + * after placement on BD for new TPA aggregation) + * + * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page + */ +#define NUM_SGE_REQ (MAX_AGG_QS(bp) + \ + (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2) +#define NUM_SGE_PG_REQ ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \ + MAX_RX_SGE_CNT) +#define SGE_TH_LO(bp) (NUM_SGE_REQ + \ + NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT) +#define SGE_TH_HI(bp) (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +/* Manipulate a bit vector defined as an array of u64 */ + +/* Number of bits in one sge_mask array element */ +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) + +/* Creates a bitmask of all ones in less significant bits. + idx - index of the most significant bit in the created mask */ +#define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + +/*******************************************************/ + + + +/* Number of u64 elements in SGE mask array */ +#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ + BIT_VEC64_ELEM_SZ) +#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) +#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) + +union host_hc_status_block { + /* pointer to fp status block e1x */ + struct host_hc_status_block_e1x *e1x_sb; + /* pointer to fp status block e2 */ + struct host_hc_status_block_e2 *e2_sb; +}; + +struct bnx2x_agg_info { + /* + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; +#define BNX2X_TPA_START 1 +#define BNX2X_TPA_STOP 2 +#define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; +}; + +#define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + +struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; +}; + +struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ + +#define BNX2X_NAPI_WEIGHT 128 + struct napi_struct napi; + union host_hc_status_block status_blk; + /* chip independed shortcuts into sb structure */ + __le16 *sb_index_values; + __le16 *sb_running_index; + /* chip independed shortcut into rx_prods_offset memory */ + u32 ustorm_rx_prods_offset; + + u32 rx_buf_size; + + dma_addr_t status_blk_mapping; + + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; + + struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ + struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ + + struct eth_rx_bd *rx_desc_ring; + dma_addr_t rx_desc_mapping; + + union eth_rx_cqe *rx_comp_ring; + dma_addr_t rx_comp_mapping; + + /* SGE ring */ + struct eth_rx_sge *rx_sge_ring; + dma_addr_t rx_sge_mapping; + + u64 sge_mask[RX_SGE_MASK_LEN]; + + u32 cid; + + __le16 fp_hc_idx; + + u8 index; /* number in fp array */ + u8 cl_id; /* eth client id */ + u8 cl_qzone_id; + u8 fw_sb_id; /* status block number in FW */ + u8 igu_sb_id; /* status block number in HW */ + + u16 rx_bd_prod; + u16 rx_bd_cons; + u16 rx_comp_prod; + u16 rx_comp_cons; + u16 rx_sge_prod; + /* The last maximal completed SGE */ + u16 last_max_sge; + __le16 *rx_cons_sb; + unsigned long rx_pkt, + rx_calls; + + /* TPA related */ + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; + u8 disable_tpa; +#ifdef BNX2X_STOP_ON_ERROR + u64 tpa_queue_used; +#endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + + /* The size is calculated using the following: + sizeof name field from netdev structure + + 4 ('-Xx-' string) + + 4 (for the digits and to make it DWORD aligned) */ +#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) + char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + +}; + +#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) + +/* Use 2500 as a mini-jumbo MTU for FCoE */ +#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + +/* FCoE L2 `fastpath' entry is right after the eth entries */ +#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) + + +#define IS_ETH_FP(fp) (fp->index < \ + BNX2X_NUM_ETH_QUEUES(fp->bp)) +#ifdef BCM_CNIC +#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) +#else +#define IS_FCOE_FP(fp) false +#define IS_FCOE_IDX(idx) false +#endif + + +/* MC hsi */ +#define MAX_FETCH_BD 13 /* HW max BDs per packet */ +#define RX_COPY_THRESH 92 + +#define NUM_TX_RINGS 16 +#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) +#define NEXT_PAGE_TX_DESC_CNT 1 +#define MAX_TX_DESC_CNT (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT) +#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) +#define MAX_TX_BD (NUM_TX_BD - 1) +#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) +#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ + (MAX_TX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \ + (x) + 1) +#define TX_BD(x) ((x) & MAX_TX_BD) +#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) + +/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ +#define NUM_RX_RINGS 8 +#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) +#define NEXT_PAGE_RX_DESC_CNT 2 +#define MAX_RX_DESC_CNT (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT) +#define RX_DESC_MASK (RX_DESC_CNT - 1) +#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) +#define MAX_RX_BD (NUM_RX_BD - 1) +#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) + +/* dropless fc calculations for BDs + * + * Number of BDs should as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT + * "next" elements on each page + */ +#define NUM_BD_REQ BRB_SIZE(bp) +#define NUM_BD_PG_REQ ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \ + MAX_RX_DESC_CNT) +#define BD_TH_LO(bp) (NUM_BD_REQ + \ + NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define BD_TH_HI(bp) (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM) + +#define MIN_RX_AVAIL ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128) + +#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) +#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + +#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ + (MAX_RX_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \ + (x) + 1) +#define RX_BD(x) ((x) & MAX_RX_BD) + +/* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ +#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) +#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) +#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) +#define NEXT_PAGE_RCQ_DESC_CNT 1 +#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT) +#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) +#define MAX_RCQ_BD (NUM_RCQ_BD - 1) +#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) +#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ + (MAX_RCQ_DESC_CNT - 1)) ? \ + (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \ + (x) + 1) +#define RCQ_BD(x) ((x) & MAX_RCQ_BD) + +/* dropless fc calculations for RCQs + * + * Number of RCQs should be as number of buffers in BRB: + * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT + * "next" elements on each page + */ +#define NUM_RCQ_REQ BRB_SIZE(bp) +#define NUM_RCQ_PG_REQ ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \ + MAX_RCQ_DESC_CNT) +#define RCQ_TH_LO(bp) (NUM_RCQ_REQ + \ + NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \ + FW_DROP_LEVEL(bp)) +#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM) + + +/* This is needed for determining of last_max */ +#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) +#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) + + +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) + +/* used on a CID received from the HW */ +#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) +#define CQE_CMD(x) (le32_to_cpu(x) >> \ + COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) + +#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr_hi), \ + le32_to_cpu((bd)->addr_lo)) +#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) + +#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ +#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Min DB doorbell stride is 8" +#endif +#define DPM_TRIGER_TYPE 0x40 +#define DOORBELL(bp, cid, val) \ + do { \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ + DPM_TRIGER_TYPE); \ + } while (0) + + +/* TX CSUM helpers */ +#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ + skb->csum_offset) +#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ + skb->csum_offset)) + +#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) + +#define XMIT_PLAIN 0 +#define XMIT_CSUM_V4 0x1 +#define XMIT_CSUM_V6 0x2 +#define XMIT_CSUM_TCP 0x4 +#define XMIT_GSO_V4 0x8 +#define XMIT_GSO_V6 0x10 + +#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) +#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) + + +/* stuff added to make the code fit 80Col */ +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) + +#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG + +#define BNX2X_IP_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) + +#define BNX2X_L4_CSUM_ERR(cqe) \ + (!((cqe)->fast_path_cqe.status_flags & \ + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ + ((cqe)->fast_path_cqe.type_error_flags & \ + ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) + +#define BNX2X_RX_CSUM_OK(cqe) \ + (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) + +#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ + (((le16_to_cpu(flags) & \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \ + == PRS_FLAG_OVERETH_IPV4) +#define BNX2X_RX_SUM_FIX(cqe) \ + BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) + + +#define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) +#define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + +#define HC_INDEX_ETH_RX_CQ_CONS 1 + +#define HC_INDEX_OOO_TX_CQ_CONS 4 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 + +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + +#define BNX2X_RX_SB_INDEX \ + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) + +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + +#define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) + +/* end of fast path */ + +/* common */ + +struct bnx2x_common { + + u32 chip_id; +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ +#define CHIP_ID(bp) (bp->common.chip_id & 0xfffffff0) + +#define CHIP_NUM(bp) (bp->common.chip_id >> 16) +#define CHIP_NUM_57710 0x164e +#define CHIP_NUM_57711 0x164f +#define CHIP_NUM_57711E 0x1650 +#define CHIP_NUM_57712 0x1662 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57713 0x1651 +#define CHIP_NUM_57713E 0x1652 +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57840 0x168d +#define CHIP_NUM_57840_MF 0x16ab +#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) +#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) +#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) +#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) +#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) +#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) +#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) +#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ + CHIP_IS_57711E(bp)) +#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ + CHIP_IS_57712_MF(bp)) +#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp)) +#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) +#define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) +#define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) +/* assume maximum 5 revisions */ +#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) +/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ +#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + !(CHIP_REV_VAL(bp) & 0x00001000)) +/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ +#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ + (CHIP_REV_VAL(bp) & 0x00001000)) + +#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ + ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) + +#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) +#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) +#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) +#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) +#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) +#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) + + int flash_size; +#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ +#define BNX2X_NVRAM_TIMEOUT_COUNT 30000 +#define BNX2X_NVRAM_PAGE_SIZE 256 + + u32 shmem_base; + u32 shmem2_base; + u32 mf_cfg_base; + u32 mf2_cfg_base; + + u32 hw_config; + + u32 bc_ver; + + u8 int_block; +#define INT_BLOCK_HC 0 +#define INT_BLOCK_IGU 1 +#define INT_BLOCK_MODE_NORMAL 0 +#define INT_BLOCK_MODE_BW_COMP 2 +#define CHIP_INT_MODE_IS_NBC(bp) \ + (!CHIP_IS_E1x(bp) && \ + !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) +#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) + + u8 chip_port_mode; +#define CHIP_4_PORT_MODE 0x0 +#define CHIP_2_PORT_MODE 0x1 +#define CHIP_PORT_MODE_NONE 0x2 +#define CHIP_MODE(bp) (bp->common.chip_port_mode) +#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE) +}; + +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */ +#define BNX2X_IGU_STAS_MSG_VF_CNT 64 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4 + +/* end of common */ + +/* port */ + +struct bnx2x_port { + u32 pmf; + + u32 link_config[LINK_CONFIG_SIZE]; + + u32 supported[LINK_CONFIG_SIZE]; +/* link settings - missing defines */ +#define SUPPORTED_2500baseX_Full (1 << 15) + + u32 advertising[LINK_CONFIG_SIZE]; +/* link settings - missing defines */ +#define ADVERTISED_2500baseX_Full (1 << 15) + + u32 phy_addr; + + /* used to synchronize phy accesses */ + struct mutex phy_mutex; + int need_hw_lock; + + u32 port_stx; + + struct nig_stats old_nig_stats; +}; + +/* end of port */ + +#define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) + +/* slow path */ + +/* slow path work-queue */ +extern struct workqueue_struct *bnx2x_wq; + +#define BNX2X_MAX_NUM_OF_VFS 64 +#define BNX2X_VF_ID_INVALID 0xFF + +/* + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is + * control by the number of fast-path status blocks supported by the + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default + * status block represents an independent interrupts context that can + * serve a regular L2 networking queue. However special L2 queues such + * as the FCoE queue do not require a FP-SB and other components like + * the CNIC may consume FP-SB reducing the number of possible L2 queues + * + * If the maximum number of FP-SB available is X then: + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of + * regular L2 queues is Y=X-1 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor) + * c. If the FCoE L2 queue is supported the actual number of L2 queues + * is Y+1 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for + * slow-path interrupts) or Y+2 if CNIC is supported (one additional + * FP interrupt context for the CNIC). + * e. The number of HW context (CID count) is always X or X+1 if FCoE + * L2 queue is supported. the cid for the FCoE L2 queue is always X. + */ + +/* fast-path interrupt contexts E1x */ +#define FP_SB_MAX_E1x 16 +/* fast-path interrupt contexts E2 */ +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 + +union cdu_context { + struct eth_context eth; + char pad[1024]; +}; + +/* CDU host DB constants */ +#define CDU_ILT_PAGE_SZ_HW 3 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ +#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) + +#ifdef BCM_CNIC +#define CNIC_ISCSI_CID_MAX 256 +#define CNIC_FCOE_CID_MAX 2048 +#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) +#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) +#endif + +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ +#define QM_CID_ROUND 1024 + +#ifdef BCM_CNIC +/* TM (timers) host DB constants */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ +/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ +#define TM_CONN_NUM 1024 +#define TM_ILT_SZ (8 * TM_CONN_NUM) +#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) + +/* SRC (Searcher) host DB constants */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ +#define SRC_HASH_BITS 10 +#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ +#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) +#define SRC_T2_SZ SRC_ILT_SZ +#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + +#endif + +#define MAX_DMAE_C 8 + +/* DMA memory not used in fastpath */ +struct bnx2x_slowpath { + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; + + /* used by dmae command executer */ + struct dmae_command dmae[MAX_DMAE_C]; + + u32 stats_comp; + union mac_stats mac_stats; + struct nig_stats nig_stats; + struct host_port_stats port_stats; + struct host_func_stats func_stats; + struct host_func_stats func_stats_base; + + u32 wb_comp; + u32 wb_data[4]; +}; + +#define bnx2x_sp(bp, var) (&bp->slowpath->var) +#define bnx2x_sp_mapping(bp, var) \ + (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var)) + + +/* attn group wiring */ +#define MAX_DYNAMIC_ATTN_GRPS 8 + +struct attn_route { + u32 sig[5]; +}; + +struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; +}; + +struct hw_context { + union cdu_context *vcxt; + dma_addr_t cxt_mapping; + size_t size; +}; + +/* forward */ +struct bnx2x_ilt; + + +enum bnx2x_recovery_state { + BNX2X_RECOVERY_DONE, + BNX2X_RECOVERY_INIT, + BNX2X_RECOVERY_WAIT, + BNX2X_RECOVERY_FAILED +}; + +/* + * Event queue (EQ or event ring) MC hsi + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 + */ +#define NUM_EQ_PAGES 1 +#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem)) +#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1) +#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES) +#define EQ_DESC_MASK (NUM_EQ_DESC - 1) +#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2) + +/* depends on EQ_DESC_CNT_PAGE being a power of 2 */ +#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \ + (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1) + +/* depends on the above and on NUM_EQ_PAGES being a power of 2 */ +#define EQ_DESC(x) ((x) & EQ_DESC_MASK) + +#define BNX2X_EQ_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_EQ_CONS]) + +/* This is a data that will be used to create a link report message. + * We will keep the data used for the last link report in order + * to prevent reporting the same link parameters twice. + */ +struct bnx2x_link_report_data { + u16 line_speed; /* Effective line speed */ + unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */ +}; + +enum { + BNX2X_LINK_REPORT_FD, /* Full DUPLEX */ + BNX2X_LINK_REPORT_LINK_DOWN, + BNX2X_LINK_REPORT_RX_FC_ON, + BNX2X_LINK_REPORT_TX_FC_ON, +}; + +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; +}; + +/* Public slow path states */ +enum { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, +}; + + +struct bnx2x { + /* Fields used in the tx and intr/napi performance paths + * are grouped together in the beginning of the structure + */ + struct bnx2x_fastpath *fp; + void __iomem *regview; + void __iomem *doorbells; + u16 db_size; + + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ +#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) +#define BP_PORT(bp) (bp->pfid & 1) +#define BP_FUNC(bp) (bp->pfid) +#define BP_ABS_FUNC(bp) (bp->pf_num) +#define BP_VN(bp) ((bp)->pfid >> 1) +#define BP_MAX_VN_NUM(bp) (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4) +#define BP_L_ID(bp) (BP_VN(bp) << 2) +#define BP_FW_MB_IDX_VN(bp, vn) (BP_PORT(bp) +\ + (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) +#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) + + struct net_device *dev; + struct pci_dev *pdev; + + const struct iro *iro_arr; +#define IRO (bp->iro_arr) + + enum bnx2x_recovery_state recovery_state; + int is_leader; + struct msix_entry *msix_table; + + int tx_ring_size; + +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) +#define ETH_MIN_PACKET_SIZE 60 +#define ETH_MAX_PACKET_SIZE 1500 +#define ETH_MAX_JUMBO_PACKET_SIZE 9600 + + /* Max supported alignment is 256 (8 shift) */ +#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ + L1_CACHE_SHIFT : 8) + /* FW use 2 Cache lines Alignment for start packet and size */ +#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) +#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) + + struct host_sp_status_block *def_status_blk; +#define DEF_SB_IGU_ID 16 +#define DEF_SB_ID HC_SP_SB_ID + __le16 def_idx; + __le16 def_att_idx; + u32 attn_state; + struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; + + /* slow path ring */ + struct eth_spe *spq; + dma_addr_t spq_mapping; + u16 spq_prod_idx; + struct eth_spe *spq_prod_bd; + struct eth_spe *spq_last_bd; + __le16 *dsb_sp_prod; + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ + /* used to synchronize spq accesses */ + spinlock_t spq_lock; + + /* event queue */ + union event_ring_elem *eq_ring; + dma_addr_t eq_mapping; + u16 eq_prod; + u16 eq_cons; + __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ + + + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; + + /* End of fields used in the performance code paths */ + + int panic; + int msg_enable; + + u32 flags; +#define PCIX_FLAG (1 << 0) +#define PCI_32BIT_FLAG (1 << 1) +#define ONE_PORT_FLAG (1 << 2) +#define NO_WOL_FLAG (1 << 3) +#define USING_DAC_FLAG (1 << 4) +#define USING_MSIX_FLAG (1 << 5) +#define USING_MSI_FLAG (1 << 6) +#define DISABLE_MSI_FLAG (1 << 7) +#define TPA_ENABLE_FLAG (1 << 8) +#define NO_MCP_FLAG (1 << 9) + +#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) +#define MF_FUNC_DIS (1 << 11) +#define OWN_CNIC_IRQ (1 << 12) +#define NO_ISCSI_OOO_FLAG (1 << 13) +#define NO_ISCSI_FLAG (1 << 14) +#define NO_FCOE_FLAG (1 << 15) + +#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) +#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) +#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) + + int pm_cap; + int mrrs; + + struct delayed_work sp_task; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; + struct timer_list timer; + int current_interval; + + u16 fw_seq; + u16 fw_drv_pulse_wr_seq; + u32 func_stx; + + struct link_params link_params; + struct link_vars link_vars; + u32 link_cnt; + struct bnx2x_link_report_data last_reported_link; + + struct mdio_if_info mdio; + + struct bnx2x_common common; + struct bnx2x_port port; + + struct cmng_struct_per_port cmng; + u32 vn_weight_sum; + u32 mf_config[E1HVN_MAX]; + u32 mf2_config[E2_FUNC_MAX]; + u32 path_has_ovlan; /* E3 */ + u16 mf_ov; + u8 mf_mode; +#define IS_MF(bp) (bp->mf_mode != 0) +#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) +#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) + + u8 wol; + + int rx_ring_size; + + u16 tx_quick_cons_trip_int; + u16 tx_quick_cons_trip; + u16 tx_ticks_int; + u16 tx_ticks; + + u16 rx_quick_cons_trip_int; + u16 rx_quick_cons_trip; + u16 rx_ticks_int; + u16 rx_ticks; +/* Maximal coalescing timeout in us */ +#define BNX2X_MAX_COALESCE_TOUT (0xf0*12) + + u32 lin_cnt; + + u16 state; +#define BNX2X_STATE_CLOSED 0 +#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 +#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 +#define BNX2X_STATE_OPEN 0x3000 +#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 +#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 + +#define BNX2X_STATE_DIAG 0xe000 +#define BNX2X_STATE_ERROR 0xf000 + + int multi_mode; +#define BNX2X_MAX_PRIORITY 8 +#define BNX2X_MAX_ENTRIES_PER_PRI 16 +#define BNX2X_MAX_COS 3 +#define BNX2X_MAX_TX_COS 2 + int num_queues; + int disable_tpa; + + u32 rx_mode; +#define BNX2X_RX_MODE_NONE 0 +#define BNX2X_RX_MODE_NORMAL 1 +#define BNX2X_RX_MODE_ALLMULTI 2 +#define BNX2X_RX_MODE_PROMISC 3 +#define BNX2X_MAX_MULTICAST 64 + + u8 igu_dsb_id; + u8 igu_base_sb; + u8 igu_sb_cnt; + dma_addr_t def_status_blk_mapping; + + struct bnx2x_slowpath *slowpath; + dma_addr_t slowpath_mapping; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the begining of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + + struct hw_context context; + + struct bnx2x_ilt *ilt; +#define BP_ILT(bp) ((bp)->ilt) +#define ILT_MAX_LINES 256 +/* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ +#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) + +/* + * Maximum CID count that might be required by the bnx2x: + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + */ +#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) +#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) +#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) + + int qm_cid_count; + + int dropless_fc; + +#ifdef BCM_CNIC + u32 cnic_flags; +#define BNX2X_CNIC_FLAG_MAC_SET 1 + void *t2; + dma_addr_t t2_mapping; + struct cnic_ops __rcu *cnic_ops; + void *cnic_data; + u32 cnic_tag; + struct cnic_eth_dev cnic_eth_dev; + union host_hc_status_block cnic_sb; + dma_addr_t cnic_sb_mapping; + struct eth_spe *cnic_kwq; + struct eth_spe *cnic_kwq_prod; + struct eth_spe *cnic_kwq_cons; + struct eth_spe *cnic_kwq_last; + u16 cnic_kwq_pending; + u16 cnic_spq_pending; + u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 cleints */ + u8 cnic_base_cl_id; +#endif + + int dmae_ready; + /* used to synchronize dmae accesses */ + spinlock_t dmae_lock; + + /* used to protect the FW mail box */ + struct mutex fw_mb_mutex; + + /* used to synchronize stats collecting */ + int stats_state; + + /* used for synchronization of concurrent threads statistics handling */ + spinlock_t stats_lock; + + /* used by dmae command loader */ + struct dmae_command stats_dmae; + int executer_idx; + + u16 stats_counter; + struct bnx2x_eth_stats eth_stats; + + struct z_stream_s *strm; + void *gunzip_buf; + dma_addr_t gunzip_mapping; + int gunzip_outlen; +#define FW_BUF_SIZE 0x8000 +#define GUNZIP_BUF(bp) (bp->gunzip_buf) +#define GUNZIP_PHYS(bp) (bp->gunzip_mapping) +#define GUNZIP_OUTLEN(bp) (bp->gunzip_outlen) + + struct raw_op *init_ops; + /* Init blocks offsets inside init_ops */ + u16 *init_ops_offsets; + /* Data blob - has 32 bit granularity */ + u32 *init_data; + u32 init_mode_flags; +#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) + /* Zipped PRAM blobs - raw data */ + const u8 *tsem_int_table_data; + const u8 *tsem_pram_data; + const u8 *usem_int_table_data; + const u8 *usem_pram_data; + const u8 *xsem_int_table_data; + const u8 *xsem_pram_data; + const u8 *csem_int_table_data; + const u8 *csem_pram_data; +#define INIT_OPS(bp) (bp->init_ops) +#define INIT_OPS_OFFSETS(bp) (bp->init_ops_offsets) +#define INIT_DATA(bp) (bp->init_data) +#define INIT_TSEM_INT_TABLE_DATA(bp) (bp->tsem_int_table_data) +#define INIT_TSEM_PRAM_DATA(bp) (bp->tsem_pram_data) +#define INIT_USEM_INT_TABLE_DATA(bp) (bp->usem_int_table_data) +#define INIT_USEM_PRAM_DATA(bp) (bp->usem_pram_data) +#define INIT_XSEM_INT_TABLE_DATA(bp) (bp->xsem_int_table_data) +#define INIT_XSEM_PRAM_DATA(bp) (bp->xsem_pram_data) +#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) +#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) + +#define PHY_FW_VER_LEN 20 + char fw_ver[32]; + const struct firmware *firmware; + + /* DCB support on/off */ + u16 dcb_state; +#define BNX2X_DCB_STATE_OFF 0 +#define BNX2X_DCB_STATE_ON 1 + + /* DCBX engine mode */ + int dcbx_enabled; +#define BNX2X_DCBX_ENABLED_OFF 0 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2 +#define BNX2X_DCBX_ENABLED_INVALID (-1) + + bool dcbx_mode_uset; + + struct bnx2x_config_dcbx_params dcbx_config_params; + struct bnx2x_dcbx_port_params dcbx_port_params; + int dcb_version; + + /* CAM credit pools */ + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* DCBX Negotation results */ + struct dcbx_features dcbx_local_feat; + u32 dcbx_error; + +#ifdef BCM_DCBNL + struct dcbx_features dcbx_remote_feat; + u32 dcbx_remote_flags; +#endif + u32 pending_max; + + /* multiple tx classes of service */ + u8 max_cos; + + /* priority to cos mapping */ + u8 prio_to_cos[8]; +}; + +/* Tx queues may be less or equal to Rx queues */ +extern int num_queues; +#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) +#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) + +#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) + +#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) +/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ + +#define RSS_IPV4_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY + +#define RSS_IPV4_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY + +#define RSS_IPV6_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY + +#define RSS_IPV6_TCP_CAP_MASK \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY + +/* func init flags */ +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* removed FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_TPA 0x0008 +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ + + +struct bnx2x_func_init_params { + /* dma */ + dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ + dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ + + u16 func_flgs; + u16 func_id; /* abs fid */ + u16 pf_id; + u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ +}; + +#define for_each_eth_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_nondefault_eth_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) + +#define for_each_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +/* Skip forwarding FP */ +#define for_each_rx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_rx_queue(bp, var)) \ + continue; \ + else + +/* Skip OOO FP */ +#define for_each_tx_queue(bp, var) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_tx_queue(bp, var)) \ + continue; \ + else + +#define for_each_nondefault_queue(bp, var) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ + if (skip_queue(bp, var)) \ + continue; \ + else + +#define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + +/* skip rx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +/* skip tx queue + * if FCOE l2 support is disabled and this is the fcoe L2 queue + */ +#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + +#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) + + + + +/** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); +/** + * Deletes all MACs configured for the specific MAC object. + * + * @param bp Function driver instance + * @param mac_obj MAC object to cleanup + * + * @return zero if all MACs were cleaned + */ + +/** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + +/* Init Function API */ +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +void bnx2x_read_mf_cfg(struct bnx2x *bp); + + +/* dmae */ +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, + u32 len32); +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, + bool with_comp, u8 comp_type); + + +void bnx2x_calc_fc_adv(struct bnx2x *bp); +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, + u32 data_hi, u32 data_lo, int cmd_type); +void bnx2x_update_coalesce(struct bnx2x *bp); +int bnx2x_get_cur_phy_idx(struct bnx2x *bp); + +static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, + int wait) +{ + u32 val; + + do { + val = REG_RD(bp, reg); + if (val == expected) + break; + ms -= wait; + msleep(wait); + + } while (ms > 0); + + return val; +} + +#define BNX2X_ILT_ZALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x) \ + memset(x, 0, size); \ + } while (0) + +#define BNX2X_ILT_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define ILOG2(x) (ilog2((x))) + +#define ILT_NUM_PAGE_ENTRIES (3072) +/* In 57710/11 we use whole table since we have 8 func + * In 57712 we have only 4 func, but use same size per func, then only half of + * the table in use + */ +#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8) + +#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC) +/* + * the phys address is shifted right 12 bits and has an added + * 1=valid bit added to the 53rd bit + * then since this is a wide register(TM) + * we split it into two 32 bit writes + */ +#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF)) +#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) + +/* load/unload mode */ +#define LOAD_NORMAL 0 +#define LOAD_OPEN 1 +#define LOAD_DIAG 2 +#define UNLOAD_NORMAL 0 +#define UNLOAD_CLOSE 1 +#define UNLOAD_RECOVERY 2 + + +/* DMAE command defines */ +#define DMAE_TIMEOUT -1 +#define DMAE_PCI_ERROR -2 /* E2 and onward */ +#define DMAE_NOT_RDY -3 +#define DMAE_PCI_ERR_FLAG 0x80000000 + +#define DMAE_SRC_PCI 0 +#define DMAE_SRC_GRC 1 + +#define DMAE_DST_NONE 0 +#define DMAE_DST_PCI 1 +#define DMAE_DST_GRC 2 + +#define DMAE_COMP_PCI 0 +#define DMAE_COMP_GRC 1 + +/* E2 and onward - PCI error handling in the completion */ + +#define DMAE_COMP_REGULAR 0 +#define DMAE_COM_SET_ERR 1 + +#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \ + DMAE_COMMAND_SRC_SHIFT) +#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \ + DMAE_COMMAND_SRC_SHIFT) + +#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \ + DMAE_COMMAND_DST_SHIFT) +#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \ + DMAE_COMMAND_DST_SHIFT) + +#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \ + DMAE_COMMAND_C_DST_SHIFT) +#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \ + DMAE_COMMAND_C_DST_SHIFT) + +#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE + +#define DMAE_CMD_ENDIANITY_NO_SWAP (0 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_SWAP (1 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_DW_SWAP (2 << DMAE_COMMAND_ENDIANITY_SHIFT) +#define DMAE_CMD_ENDIANITY_B_DW_SWAP (3 << DMAE_COMMAND_ENDIANITY_SHIFT) + +#define DMAE_CMD_PORT_0 0 +#define DMAE_CMD_PORT_1 DMAE_COMMAND_PORT + +#define DMAE_CMD_SRC_RESET DMAE_COMMAND_SRC_RESET +#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET +#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT + +#define DMAE_SRC_PF 0 +#define DMAE_SRC_VF 1 + +#define DMAE_DST_PF 0 +#define DMAE_DST_VF 1 + +#define DMAE_C_SRC 0 +#define DMAE_C_DST 1 + +#define DMAE_LEN32_RD_MAX 0x80 +#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) + +#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit + indicates eror */ + +#define MAX_DMAE_C_PER_PORT 8 +#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + BP_VN(bp)) +#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ + E1HVN_MAX) + +/* PCIE link and speed */ +#define PCICFG_LINK_WIDTH 0x1f00000 +#define PCICFG_LINK_WIDTH_SHIFT 20 +#define PCICFG_LINK_SPEED 0xf0000 +#define PCICFG_LINK_SPEED_SHIFT 16 + + +#define BNX2X_NUM_TESTS 7 + +#define BNX2X_PHY_LOOPBACK 0 +#define BNX2X_MAC_LOOPBACK 1 +#define BNX2X_PHY_LOOPBACK_FAILED 1 +#define BNX2X_MAC_LOOPBACK_FAILED 2 +#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ + BNX2X_PHY_LOOPBACK_FAILED) + + +#define STROM_ASSERT_ARRAY_SIZE 50 + + +/* must be used on a CID before placing it on a HW ring */ +#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ + (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) + +#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) +#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) + + +#define BNX2X_BTR 4 +#define MAX_SPQ_PENDING 8 + +/* CMNG constants, as derived from system spec calculations */ +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ +#define DEF_MIN_RATE 100 +/* resolution of the rate shaping timer - 400 usec */ +#define RS_PERIODIC_TIMEOUT_USEC 400 +/* number of bytes in single QM arbitration cycle - + * coefficient for calculating the fairness timer */ +#define QM_ARB_BYTES 160000 +/* resolution of Min algorithm 1:100 */ +#define MIN_RES 100 +/* how many bytes above threshold for the minimal credit of Min algorithm*/ +#define MIN_ABOVE_THRESH 32768 +/* Fairness algorithm integration time coefficient - + * for calculating the actual Tfair */ +#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) +/* Memory of fairness algorithm . 2 cycles */ +#define FAIR_MEM 2 + + +#define ATTN_NIG_FOR_FUNC (1L << 8) +#define ATTN_SW_TIMER_4_FUNC (1L << 9) +#define GPIO_2_FUNC (1L << 10) +#define GPIO_3_FUNC (1L << 11) +#define GPIO_4_FUNC (1L << 12) +#define ATTN_GENERAL_ATTN_1 (1L << 13) +#define ATTN_GENERAL_ATTN_2 (1L << 14) +#define ATTN_GENERAL_ATTN_3 (1L << 15) +#define ATTN_GENERAL_ATTN_4 (1L << 13) +#define ATTN_GENERAL_ATTN_5 (1L << 14) +#define ATTN_GENERAL_ATTN_6 (1L << 15) + +#define ATTN_HARD_WIRED_MASK 0xff00 +#define ATTENTION_ID 4 + + +/* stuff added to make the code fit 80Col */ + +#define BNX2X_PMF_LINK_ASSERT \ + GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp)) + +#define BNX2X_MC_ASSERT_BITS \ + (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \ + GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT)) + +#define BNX2X_MCP_ASSERT \ + GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT) + +#define BNX2X_GRC_TIMEOUT GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC) +#define BNX2X_GRC_RSV (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ + GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) + +#define HW_INTERRUT_ASSERT_SET_0 \ + (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_1 \ + (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) +#define HW_INTERRUT_ASSERT_SET_2 \ + (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\ + AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT) +#define HW_PRTY_ASSERT_SET_2 (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) + +#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + +#define RSS_FLAGS(bp) \ + (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \ + (bp->multi_mode << \ + TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) +#define MULTI_MASK 0x7f + + +#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) +#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) +#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) +#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + +#define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) +#define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) +#define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) +#define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + +#define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) +#define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) +#define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) +#define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + +#define BNX2X_SP_DSB_INDEX \ + (&bp->def_status_blk->sp_sb.\ + index_values[HC_SP_INDEX_ETH_DEF_CONS]) + +#define SET_FLAG(value, mask, flag) \ + do {\ + (value) &= ~(mask);\ + (value) |= ((flag) << (mask##_SHIFT));\ + } while (0) + +#define GET_FLAG(value, mask) \ + (((value) & (mask)) >> (mask##_SHIFT)) + +#define GET_FIELD(value, fname) \ + (((value) & (fname##_MASK)) >> (fname##_SHIFT)) + +#define CAM_IS_INVALID(x) \ + (GET_FLAG(x.flags, \ + MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ + (T_ETH_MAC_COMMAND_INVALIDATE)) + +/* Number of u32 elements in MC hash array */ +#define MC_HASH_SIZE 8 +#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ + TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4) + + +#ifndef PXP2_REG_PXP2_INT_STS +#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 +#endif + +#ifndef ETH_MAX_RX_CLIENTS_E2 +#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H +#endif + +#define BNX2X_VPD_LEN 128 +#define VENDOR_ID_LEN 4 + +/* Congestion management fairness mode */ +#define CMNG_FNS_NONE 0 +#define CMNG_FNS_MINMAX 1 + +#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/ +#define HC_SEG_ACCESS_ATTN 4 +#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ + +static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + +void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_notify_link_changed(struct bnx2x *bp); +#endif /* bnx2x.h */ diff --cc drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 5b1f9b5ec499,000000000000..283d663da180 mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@@ -1,1491 -1,0 +1,1491 @@@ +/* bnx2x_cmn.h: Broadcom Everest network driver. + * + * Copyright (c) 2007-2011 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Maintained by: Eilon Greenstein + * Written by: Eliezer Tamir + * Based on code from Michael Chan's bnx2 driver + * UDP CSUM errata workaround by Arik Gendelman + * Slowpath and fastpath rework by Vladislav Zolotarov + * Statistics and Link management by Yitchak Gertner + * + */ +#ifndef BNX2X_CMN_H +#define BNX2X_CMN_H + +#include +#include +#include + + +#include "bnx2x.h" + +/* This is used as a replacement for an MCP if it's not present */ +extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ + +extern int num_queues; + +/************************ Macros ********************************/ +#define BNX2X_PCI_FREE(x, y, size) \ + do { \ + if (x) { \ + dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ + x = NULL; \ + y = 0; \ + } \ + } while (0) + +#define BNX2X_FREE(x) \ + do { \ + if (x) { \ + kfree((void *)x); \ + x = NULL; \ + } \ + } while (0) + +#define BNX2X_PCI_ALLOC(x, y, size) \ + do { \ + x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + memset((void *)x, 0, size); \ + } while (0) + +#define BNX2X_ALLOC(x, size) \ + do { \ + x = kzalloc(size, GFP_KERNEL); \ + if (x == NULL) \ + goto alloc_mem_err; \ + } while (0) + +/*********************** Interfaces **************************** + * Functions that need to be implemented by each driver version + */ +/* Init */ + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp); + +/** + * bnx2x_config_rss_pf - configure RSS parameters. + * + * @bp: driver handle + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + */ +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); + +/** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ +void bnx2x__init_func_obj(struct bnx2x *bp); + +/** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + +/** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ +int bnx2x_setup_leading(struct bnx2x *bp); + +/** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); + +/** + * bnx2x_initial_phy_init - initialize link parameters structure variables. + * + * @bp: driver handle + * @load_mode: current mode + */ +u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); + +/** + * bnx2x_link_set - configure hw according to link parameters structure. + * + * @bp: driver handle + */ +void bnx2x_link_set(struct bnx2x *bp); + +/** + * bnx2x_link_test - query link status. + * + * @bp: driver handle + * @is_serdes: bool + * + * Returns 0 if link is UP. + */ +u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); + +/** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ +void bnx2x_drv_pulse(struct bnx2x *bp); + +/** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + +/* Disable transactions from chip to host */ +void bnx2x_pf_disable(struct bnx2x *bp); + +/** + * bnx2x__link_status_update - handles link status change. + * + * @bp: driver handle + */ +void bnx2x__link_status_update(struct bnx2x *bp); + +/** + * bnx2x_link_report - report link status to upper layer. + * + * @bp: driver handle + */ +void bnx2x_link_report(struct bnx2x *bp); + +/* None-atomic version of bnx2x_link_report() */ +void __bnx2x_link_report(struct bnx2x *bp); + +/** + * bnx2x_get_mf_speed - calculate MF speed. + * + * @bp: driver handle + * + * Takes into account current linespeed and MF configuration. + */ +u16 bnx2x_get_mf_speed(struct bnx2x *bp); + +/** + * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); + +/** + * bnx2x_interrupt - non MSI-X interrupt handler + * + * @irq: irq number + * @dev_instance: private instance + */ +irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); +#ifdef BCM_CNIC + +/** + * bnx2x_cnic_notify - send command to cnic driver + * + * @bp: driver handle + * @cmd: command + */ +int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); + +/** + * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information + * + * @bp: driver handle + */ +void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); +#endif + +/** + * bnx2x_int_enable - enable HW interrupts. + * + * @bp: driver handle + */ +void bnx2x_int_enable(struct bnx2x *bp); + +/** + * bnx2x_int_disable_sync - disable interrupts. + * + * @bp: driver handle + * @disable_hw: true, disable HW interrupts. + * + * This function ensures that there are no + * ISRs or SP DPCs (sp_task) are running after it returns. + */ +void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); + +/** + * bnx2x_nic_init - init driver internals. + * + * @bp: driver handle + * @load_code: COMMON, PORT or FUNCTION + * + * Initializes: + * - rings + * - status blocks + * - etc. + */ +void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); + +/** + * bnx2x_alloc_mem - allocate driver's memory. + * + * @bp: driver handle + */ +int bnx2x_alloc_mem(struct bnx2x *bp); + +/** + * bnx2x_free_mem - release driver's memory. + * + * @bp: driver handle + */ +void bnx2x_free_mem(struct bnx2x *bp); + +/** + * bnx2x_set_num_queues - set number of queues according to mode. + * + * @bp: driver handle + */ +void bnx2x_set_num_queues(struct bnx2x *bp); + +/** + * bnx2x_chip_cleanup - cleanup chip internals. + * + * @bp: driver handle + * @unload_mode: COMMON, PORT, FUNCTION + * + * - Cleanup MAC configuration. + * - Closes clients. + * - etc. + */ +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_acquire_hw_lock - acquire HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_hw_lock - release HW lock. + * + * @bp: driver handle + * @resource: resource bit which was locked + */ +int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); + +/** + * bnx2x_release_leader_lock - release recovery leader lock + * + * @bp: driver handle + */ +int bnx2x_release_leader_lock(struct bnx2x *bp); + +/** + * bnx2x_set_eth_mac - configure eth MAC address in the HW + * + * @bp: driver handle + * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. + */ +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); + +/** + * bnx2x_set_rx_mode - set MAC filtering configurations. + * + * @dev: netdevice + * + * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() + */ +void bnx2x_set_rx_mode(struct net_device *dev); + +/** + * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. + * + * @bp: driver handle + * + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh(). + */ +void bnx2x_set_storm_rx_mode(struct bnx2x *bp); + +/** + * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. + * + * @bp: driver handle + * @cl_id: client id + * @rx_mode_flags: rx mode configuration + * @rx_accept_flags: rx accept configuration + * @tx_accept_flags: tx accept configuration (tx switch) + * @ramrod_flags: ramrod configuration + */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); + +/* Parity errors related */ +void bnx2x_inc_load_cnt(struct bnx2x *bp); +u32 bnx2x_dec_load_cnt(struct bnx2x *bp); +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); +void bnx2x_set_reset_in_progress(struct bnx2x *bp); +void bnx2x_set_reset_global(struct bnx2x *bp); +void bnx2x_disable_close_the_gate(struct bnx2x *bp); + +/** + * bnx2x_sp_event - handle ramrods completion. + * + * @fp: fastpath handle for the event + * @rr_cqe: eth_rx_cqe + */ +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); + +/** + * bnx2x_ilt_set_info - prepare ILT configurations. + * + * @bp: driver handle + */ +void bnx2x_ilt_set_info(struct bnx2x *bp); + +/** + * bnx2x_dcbx_init - initialize dcbx protocol. + * + * @bp: driver handle + */ +void bnx2x_dcbx_init(struct bnx2x *bp); + +/** + * bnx2x_set_power_state - set power state to the requested value. + * + * @bp: driver handle + * @state: required state D0 or D3hot + * + * Currently only D0 and D3hot are supported. + */ +int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); + +/** + * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. + * + * @bp: driver handle + * @value: new value + */ +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp); + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); + +/* dev_close main block */ +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); + +/* dev_open main block */ +int bnx2x_nic_load(struct bnx2x *bp, int load_mode); + +/* hard_xmit callback */ +netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); + +/* setup_tc callback */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + +/* select_queue callback */ +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); + +/* reload helper */ +int bnx2x_reload_if_running(struct net_device *dev); + +int bnx2x_change_mac_addr(struct net_device *dev, void *p); + +/* NAPI poll Rx part */ +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); + +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); + +/* NAPI poll Tx part */ +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); + +/* suspend/resume callbacks */ +int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); +int bnx2x_resume(struct pci_dev *pdev); + +/* Release IRQ vectors */ +void bnx2x_free_irq(struct bnx2x *bp); + +void bnx2x_free_fp_mem(struct bnx2x *bp); +int bnx2x_alloc_fp_mem(struct bnx2x *bp); +void bnx2x_init_rx_rings(struct bnx2x *bp); +void bnx2x_free_skbs(struct bnx2x *bp); +void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); +void bnx2x_netif_start(struct bnx2x *bp); + +/** + * bnx2x_enable_msix - set msix configuration. + * + * @bp: driver handle + * + * fills msix_table, requests vectors, updates num_queues + * according to number of available vectors. + */ +int bnx2x_enable_msix(struct bnx2x *bp); + +/** + * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly + * + * @bp: driver handle + */ +int bnx2x_enable_msi(struct bnx2x *bp); + +/** + * bnx2x_poll - NAPI callback + * + * @napi: napi structure + * @budget: + * + */ +int bnx2x_poll(struct napi_struct *napi, int budget); + +/** + * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure + * + * @bp: driver handle + */ +int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_free_mem_bp - release memories outsize main driver structure + * + * @bp: driver handle + */ +void bnx2x_free_mem_bp(struct bnx2x *bp); + +/** + * bnx2x_change_mtu - change mtu netdev callback + * + * @dev: net device + * @new_mtu: requested mtu + * + */ +int bnx2x_change_mtu(struct net_device *dev, int new_mtu); + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +/** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); +#endif +u32 bnx2x_fix_features(struct net_device *dev, u32 features); +int bnx2x_set_features(struct net_device *dev, u32 features); + +/** + * bnx2x_tx_timeout - tx timeout netdev callback + * + * @dev: net device + */ +void bnx2x_tx_timeout(struct net_device *dev); + +/*********************** Inlines **********************************/ +/*********************** Fast path ********************************/ +static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) +{ + barrier(); /* status block is written to by the chip */ + fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; +} + +static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 bd_prod, + u16 rx_comp_prod, u16 rx_sge_prod, u32 start) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* + * Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + +static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, + u8 segment, u16 index, u8 op, + u8 update, u32 igu_addr) +{ + struct igu_regular cmd_data = {0}; + + cmd_data.sb_id_and_flags = + ((index << IGU_REGULAR_SB_INDEX_SHIFT) | + (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | + (update << IGU_REGULAR_BUPDATE_SHIFT) | + (op << IGU_REGULAR_ENABLE_INT_SHIFT)); + + DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n", + cmd_data.sb_id_and_flags, igu_addr); + REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, + u8 idu_sb_id, bool is_Pf) +{ + u32 data, ctl, cnt = 100; + u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; + u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; + u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; + u32 sb_bit = 1 << (idu_sb_id%32); + u32 func_encode = func | + ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); + u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; + + /* Not supported in BC mode */ + if (CHIP_INT_MODE_IS_BC(bp)) + return; + + data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup + << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | + IGU_REGULAR_CLEANUP_SET | + IGU_REGULAR_BCLEANUP; + + ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | + func_encode << IGU_CTRL_REG_FID_SHIFT | + IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; + + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + data, igu_addr_data); + REG_WR(bp, igu_addr_data, data); + mmiowb(); + barrier(); + DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", + ctl, igu_addr_ctl); + REG_WR(bp, igu_addr_ctl, ctl); + mmiowb(); + barrier(); + + /* wait for clean up to finish */ + while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) + msleep(20); + + + if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { + DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: " + "idu_sb_id %d offset %d bit %d (cnt %d)\n", + idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); + } +} + +static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, + u8 storm, u16 index, u8 op, u8 update) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_INT_ACK); + struct igu_ack_register igu_ack; + + igu_ack.status_block_index = index; + igu_ack.sb_id_and_flags = + ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | + (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | + (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | + (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); + + DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", + (*(u32 *)&igu_ack), hc_addr); + REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); +} + +static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, + u16 index, u8 op, u8 update) +{ + if (bp->common.int_block == INT_BLOCK_HC) + bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); + else { + u8 segment; + + if (CHIP_INT_MODE_IS_BC(bp)) + segment = storm; + else if (igu_sb_id != bp->igu_dsb_id) + segment = IGU_SEG_ACCESS_DEF; + else if (storm == ATTENTION_ID) + segment = IGU_SEG_ACCESS_ATTN; + else + segment = IGU_SEG_ACCESS_DEF; + bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); + } +} + +static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) +{ + u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + + COMMAND_REG_SIMD_MASK); + u32 result = REG_RD(bp, hc_addr); + + DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", + result, hc_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) +{ + u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); + u32 result = REG_RD(bp, igu_addr); + + DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n", + result, igu_addr); + + barrier(); + return result; +} + +static inline u16 bnx2x_ack_int(struct bnx2x *bp) +{ + barrier(); + if (bp->common.int_block == INT_BLOCK_HC) + return bnx2x_hc_ack_int(bp); + else + return bnx2x_igu_ack_int(bp); +} + +static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) +{ + /* Tell compiler that consumer and producer can change */ + barrier(); + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; +} + +static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + s16 used; + u16 prod; + u16 cons; + + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; + + /* NUM_TX_RINGS = number of "next-page" entries + It will be used as a threshold */ + used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + +#ifdef BNX2X_STOP_ON_ERROR + WARN_ON(used < 0); + WARN_ON(used > bp->tx_ring_size); + WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); +#endif + + return (s16)(bp->tx_ring_size) - used; +} + +static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) +{ + u16 hw_cons; + + /* Tell compiler that status block fields can change */ + barrier(); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; +} + +static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + return true; + return false; +} + +static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) +{ + u16 rx_cons_sb; + + /* Tell compiler that status block fields can change */ + barrier(); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; + return (fp->rx_comp_cons != rx_cons_sb); +} + +/** + * bnx2x_tx_disable - disables tx from stack point of view + * + * @bp: driver handle + */ +static inline void bnx2x_tx_disable(struct bnx2x *bp) +{ + netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); +} + +static inline void bnx2x_free_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct page *page = sw_buf->page; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + + /* Skip "next page" elements */ + if (!page) + return; + + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + __free_pages(page, PAGES_PER_SGE_SHIFT); + + sw_buf->page = NULL; + sge->addr_hi = 0; + sge->addr_lo = 0; +} + +static inline void bnx2x_add_all_napi(struct bnx2x *bp) +{ + int i; + + /* Add NAPI objects */ + for_each_rx_queue(bp, i) + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), + bnx2x_poll, BNX2X_NAPI_WEIGHT); +} + +static inline void bnx2x_del_all_napi(struct bnx2x *bp) +{ + int i; + + for_each_rx_queue(bp, i) + netif_napi_del(&bnx2x_fp(bp, i, napi)); +} + +static inline void bnx2x_disable_msi(struct bnx2x *bp) +{ + if (bp->flags & USING_MSIX_FLAG) { + pci_disable_msix(bp->pdev); + bp->flags &= ~USING_MSIX_FLAG; + } else if (bp->flags & USING_MSI_FLAG) { + pci_disable_msi(bp->pdev); + bp->flags &= ~USING_MSI_FLAG; + } +} + +static inline int bnx2x_calc_num_queues(struct bnx2x *bp) +{ + return num_queues ? + min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : + min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); +} + +static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) +{ + int i, j; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + int idx = RX_SGE_CNT * i - 1; + + for (j = 0; j < 2; j++) { + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); + idx--; + } + } +} + +static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) +{ + /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ + memset(fp->sge_mask, 0xff, + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); + + /* Clear the two last indices in the page to 1: + these are the indices that correspond to the "next" element, + hence will never be indicated and should be removed from + the calculations. */ + bnx2x_clear_sge_mask_next_elems(fp); +} + +static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); + struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; + struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + dma_addr_t mapping; + + if (unlikely(page == NULL)) + return -ENOMEM; + + mapping = dma_map_page(&bp->pdev->dev, page, 0, + SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + __free_pages(page, PAGES_PER_SGE_SHIFT); + return -ENOMEM; + } + + sw_buf->page = page; + dma_unmap_addr_set(sw_buf, mapping, mapping); + + sge->addr_hi = cpu_to_le32(U64_HI(mapping)); + sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 index) +{ + struct sk_buff *skb; + struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; + struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; + dma_addr_t mapping; + + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + if (unlikely(skb == NULL)) + return -ENOMEM; + + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + rx_buf->skb = skb; + dma_unmap_addr_set(rx_buf, mapping, mapping); + + rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); + rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + + return 0; +} + +/* note that we are not allocating a new skb, + * we are just moving one from cons to prod + * we are not creating a new mapping, + * so there is no need to check for dma_mapping_error(). + */ +static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, + u16 cons, u16 prod) +{ + struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; + struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; + struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; + struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; + + dma_unmap_addr_set(prod_rx_buf, mapping, + dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->skb = cons_rx_buf->skb; + *prod_bd = *cons_bd; +} + +/************************* Init ******************************************/ + +/** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ +static inline int bnx2x_func_start(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + if (CHIP_IS_E1x(bp)) + start_params->network_cos_mode = OVERRIDE_COS; + else + start_params->network_cos_mode = STATIC_COS; + + return bnx2x_func_state_change(bp, &func_params); +} + + +/** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ +static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, + u8 *mac) +{ + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; +} + +static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + if (fp->disable_tpa) + return; + + for (i = 0; i < last; i++) + bnx2x_free_rx_sge(bp, fp, i); +} + +static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, + struct bnx2x_fastpath *fp, int last) +{ + int i; + + for (i = 0; i < last; i++) { + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + struct sk_buff *skb = first_buf->skb; + + if (skb == NULL) { + DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); + continue; + } + if (tpa_info->tpa_state == BNX2X_TPA_START) + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(first_buf, mapping), + fp->rx_buf_size, DMA_FROM_DEVICE); + dev_kfree_skb(skb); + first_buf->skb = NULL; + } +} + +static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) +{ + int i; + + for (i = 1; i <= NUM_TX_RINGS; i++) { + struct eth_tx_next_bd *tx_next_bd = + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + + tx_next_bd->addr_hi = + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + tx_next_bd->addr_lo = + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); + } + + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; + + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; +} + +static inline void bnx2x_init_tx_rings(struct bnx2x *bp) +{ + int i; + u8 cos; + + for_each_tx_queue(bp, i) + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); +} + +static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_RINGS; i++) { + struct eth_rx_bd *rx_bd; + + rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; + rx_bd->addr_hi = + cpu_to_le32(U64_HI(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + rx_bd->addr_lo = + cpu_to_le32(U64_LO(fp->rx_desc_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); + } +} + +static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) +{ + int i; + + for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { + struct eth_rx_sge *sge; + + sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; + sge->addr_hi = + cpu_to_le32(U64_HI(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + + sge->addr_lo = + cpu_to_le32(U64_LO(fp->rx_sge_mapping + + BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); + } +} + +static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) +{ + int i; + for (i = 1; i <= NUM_RCQ_RINGS; i++) { + struct eth_rx_cqe_next_page *nextpg; + + nextpg = (struct eth_rx_cqe_next_page *) + &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; + nextpg->addr_hi = + cpu_to_le32(U64_HI(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + nextpg->addr_lo = + cpu_to_le32(U64_LO(fp->rx_comp_mapping + + BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); + } +} + +/* Returns the number of actually allocated BDs */ +static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, + int rx_ring_size) +{ + struct bnx2x *bp = fp->bp; + u16 ring_prod, cqe_ring_prod; + int i; + + fp->rx_comp_cons = 0; + cqe_ring_prod = ring_prod = 0; + + /* This routine is called only during fo init so + * fp->eth_q_stats.rx_skb_alloc_failed = 0 + */ + for (i = 0; i < rx_ring_size; i++) { + if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { + fp->eth_q_stats.rx_skb_alloc_failed++; + continue; + } + ring_prod = NEXT_RX_IDX(ring_prod); + cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); + WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed)); + } + + if (fp->eth_q_stats.rx_skb_alloc_failed) + BNX2X_ERR("was only able to allocate " + "%d rx skbs on queue[%d]\n", + (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index); + + fp->rx_bd_prod = ring_prod; + /* Limit the CQE producer by the CQE ring size */ + fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, + cqe_ring_prod); + fp->rx_pkt = fp->rx_calls = 0; + + return i - fp->eth_q_stats.rx_skb_alloc_failed; +} + +/* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ +static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + if (!CHIP_IS_E1x(fp->bp)) + return fp->cl_id; + else + return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; +} + +static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) +{ + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); +} + +/** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ +static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) +{ + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; +} + +static inline void bnx2x_init_bp_objs(struct bnx2x *bp) +{ + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); +} + +static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; +} + +static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + + if (!CHIP_IS_E1x(bp)) + return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); +} + +static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) +{ + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n", + txdata->cid, txdata->txq_index); +} + +#ifdef BCM_CNIC +static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) +{ + return bp->cnic_base_cl_id + cl_idx + - (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; ++ (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; +} + +static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) +{ + + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; +} + +static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) +{ + return bp->igu_base_sb; +} + + +static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; + bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; + bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; + bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); + + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index); + + /* qZone id equals to FW (per path) client id */ + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); + /* init shortcut */ + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); +} +#endif + +static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; +#else + break; +#endif + } + cnt--; + usleep_range(1000, 1000); + } + + return 0; +} + +int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + +static inline void __storm_memset_struct(struct bnx2x *bp, + u32 addr, size_t size, u32 *data) +{ + int i; + for (i = 0; i < size/4; i++) + REG_WR(bp, addr + (i * 4), data[i]); +} + +static inline void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) +{ + size_t size = sizeof(struct tstorm_eth_function_common_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); + + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); +} + +static inline void storm_memset_cmng(struct bnx2x *bp, + struct cmng_struct_per_port *cmng, + u8 port) +{ + size_t size = sizeof(struct cmng_struct_per_port); + + u32 addr = BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); + + __storm_memset_struct(bp, addr, size, (u32 *)cmng); +} + +/** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ +static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) +{ + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 1000); + } + + smp_mb(); + + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; +} + +/** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); +void bnx2x_acquire_phy_lock(struct bnx2x *bp); +void bnx2x_release_phy_lock(struct bnx2x *bp); + +/** + * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. + * + * @bp: driver handle + * @mf_cfg: MF configuration + * + */ +static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) +{ + u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (!max_cfg) { + DP(NETIF_MSG_LINK, + "Max BW configured to 0 - using 100 instead\n"); + max_cfg = 100; + } + return max_cfg; +} + +#endif /* BNX2X_CMN_H */ diff --cc drivers/net/ethernet/broadcom/tg3.c index b89027c61937,000000000000..b865e9fdd089 mode 100644,000000..100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -1,15951 -1,0 +1,15951 @@@ +/* + * tg3.c: Broadcom Tigon3 ethernet driver. + * + * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2005-2011 Broadcom Corporation. + * + * Firmware is: + * Derived from proprietary unpublished source code, + * Copyright (C) 2000-2003 Broadcom Corporation. + * + * Permission is hereby granted for the distribution of this firmware + * data in hexadecimal or equivalent format, provided this copyright + * notice is accompanying it. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_SPARC +#include +#include +#endif + +#define BAR_0 0 +#define BAR_2 2 + +#include "tg3.h" + +/* Functions & macros to verify TG3_FLAGS types */ + +static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits) +{ + return test_bit(flag, bits); +} + +static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits) +{ + set_bit(flag, bits); +} + +static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) +{ + clear_bit(flag, bits); +} + +#define tg3_flag(tp, flag) \ + _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_set(tp, flag) \ + _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags) +#define tg3_flag_clear(tp, flag) \ + _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags) + +#define DRV_MODULE_NAME "tg3" +#define TG3_MAJ_NUM 3 +#define TG3_MIN_NUM 120 +#define DRV_MODULE_VERSION \ + __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) +#define DRV_MODULE_RELDATE "August 18, 2011" + +#define RESET_KIND_SHUTDOWN 0 +#define RESET_KIND_INIT 1 +#define RESET_KIND_SUSPEND 2 + +#define TG3_DEF_RX_MODE 0 +#define TG3_DEF_TX_MODE 0 +#define TG3_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +#define TG3_GRC_LCLCTL_PWRSW_DELAY 100 + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ + +#define TG3_TX_TIMEOUT (5 * HZ) + +/* hardware minimum and maximum for a single frame's data payload */ +#define TG3_MIN_MTU 60 +#define TG3_MAX_MTU(tp) \ + (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500) + +/* These numbers seem to be hard coded in the NIC firmware somehow. + * You can't change the ring sizes, but you can change where you place + * them in the NIC onboard memory. + */ +#define TG3_RX_STD_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700) +#define TG3_DEF_RX_RING_PENDING 200 +#define TG3_RX_JMB_RING_SIZE(tp) \ + (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ + TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) +#define TG3_DEF_RX_JUMBO_RING_PENDING 100 +#define TG3_RSS_INDIR_TBL_SIZE 128 + +/* Do not place this n-ring entries value into the tp struct itself, + * we really want to expose these constants to GCC so that modulo et + * al. operations are done with shifts and masks instead of with + * hw multiply/modulo instructions. Another solution would be to + * replace things like '% foo' with '& (foo - 1)'. + */ + +#define TG3_TX_RING_SIZE 512 +#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) + +#define TG3_RX_STD_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp)) +#define TG3_RX_JMB_RING_BYTES(tp) \ + (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp)) +#define TG3_RX_RCB_RING_BYTES(tp) \ + (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1)) +#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ + TG3_TX_RING_SIZE) +#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) + +#define TG3_DMA_BYTE_ENAB 64 + +#define TG3_RX_STD_DMA_SZ 1536 +#define TG3_RX_JMB_DMA_SZ 9046 + +#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB) + +#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) +#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) + +#define TG3_RX_STD_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp)) + +#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \ + (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp)) + +/* Due to a hardware bug, the 5701 can only DMA to memory addresses + * that are at least dword aligned when used in PCIX mode. The driver + * works around this bug by double copying the packet. This workaround + * is built into the normal double copy length check for efficiency. + * + * However, the double copy is only necessary on those architectures + * where unaligned memory accesses are inefficient. For those architectures + * where unaligned memory accesses incur little penalty, we can reintegrate + * the 5701 in the normal rx path. Doing so saves a device structure + * dereference by hardcoding the double copy threshold in place. + */ +#define TG3_RX_COPY_THRESHOLD 256 +#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD +#else + #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh) +#endif + +#if (NET_IP_ALIGN != 0) +#define TG3_RX_OFFSET(tp) ((tp)->rx_offset) +#else +#define TG3_RX_OFFSET(tp) 0 +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) +#define TG3_TX_BD_DMA_MAX 4096 + +#define TG3_RAW_IP_ALIGN 2 + +#define TG3_FW_UPDATE_TIMEOUT_SEC 5 + +#define FIRMWARE_TG3 "tigon/tg3.bin" +#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" +#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + +MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); +MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_FIRMWARE(FIRMWARE_TG3); +MODULE_FIRMWARE(FIRMWARE_TG3TSO); +MODULE_FIRMWARE(FIRMWARE_TG3TSO5); + +static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ +module_param(tg3_debug, int, 0); +MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); + +static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, + {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, + {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ + {} +}; + +MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx_octets" }, + { "rx_fragments" }, + { "rx_ucast_packets" }, + { "rx_mcast_packets" }, + { "rx_bcast_packets" }, + { "rx_fcs_errors" }, + { "rx_align_errors" }, + { "rx_xon_pause_rcvd" }, + { "rx_xoff_pause_rcvd" }, + { "rx_mac_ctrl_rcvd" }, + { "rx_xoff_entered" }, + { "rx_frame_too_long_errors" }, + { "rx_jabbers" }, + { "rx_undersize_packets" }, + { "rx_in_length_errors" }, + { "rx_out_length_errors" }, + { "rx_64_or_less_octet_packets" }, + { "rx_65_to_127_octet_packets" }, + { "rx_128_to_255_octet_packets" }, + { "rx_256_to_511_octet_packets" }, + { "rx_512_to_1023_octet_packets" }, + { "rx_1024_to_1522_octet_packets" }, + { "rx_1523_to_2047_octet_packets" }, + { "rx_2048_to_4095_octet_packets" }, + { "rx_4096_to_8191_octet_packets" }, + { "rx_8192_to_9022_octet_packets" }, + + { "tx_octets" }, + { "tx_collisions" }, + + { "tx_xon_sent" }, + { "tx_xoff_sent" }, + { "tx_flow_control" }, + { "tx_mac_errors" }, + { "tx_single_collisions" }, + { "tx_mult_collisions" }, + { "tx_deferred" }, + { "tx_excessive_collisions" }, + { "tx_late_collisions" }, + { "tx_collide_2times" }, + { "tx_collide_3times" }, + { "tx_collide_4times" }, + { "tx_collide_5times" }, + { "tx_collide_6times" }, + { "tx_collide_7times" }, + { "tx_collide_8times" }, + { "tx_collide_9times" }, + { "tx_collide_10times" }, + { "tx_collide_11times" }, + { "tx_collide_12times" }, + { "tx_collide_13times" }, + { "tx_collide_14times" }, + { "tx_collide_15times" }, + { "tx_ucast_packets" }, + { "tx_mcast_packets" }, + { "tx_bcast_packets" }, + { "tx_carrier_sense_errors" }, + { "tx_discards" }, + { "tx_errors" }, + + { "dma_writeq_full" }, + { "dma_write_prioq_full" }, + { "rxbds_empty" }, + { "rx_discards" }, + { "rx_errors" }, + { "rx_threshold_hit" }, + + { "dma_readq_full" }, + { "dma_read_prioq_full" }, + { "tx_comp_queue_full" }, + + { "ring_set_send_prod_index" }, + { "ring_status_update" }, + { "nic_irqs" }, + { "nic_avoided_irqs" }, + { "nic_tx_threshold_hit" }, + + { "mbuf_lwm_thresh_hit" }, +}; + +#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) + + +static const struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_test_keys[] = { + { "nvram test (online) " }, + { "link test (online) " }, + { "register test (offline)" }, + { "memory test (offline)" }, + { "mac loopback test (offline)" }, + { "phy loopback test (offline)" }, + { "ext loopback test (offline)" }, + { "interrupt test (offline)" }, +}; + +#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) + + +static void tg3_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); +} + +static u32 tg3_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off); +} + +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->aperegs + off); +} + +static u32 tg3_ape_read32(struct tg3 *tp, u32 off) +{ + return readl(tp->aperegs + off); +} + +static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off); + readl(tp->regs + off); +} + +static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) { + pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + if (off == TG3_RX_STD_PROD_IDX_REG) { + pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + + TG3_64BIT_REG_LOW, val); + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + + /* In indirect mode when disabling interrupts, we also need + * to clear the interrupt bit in the GRC local ctrl register. + */ + if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) && + (val == 0x1)) { + pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL, + tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT); + } +} + +static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(&tp->indirect_lock, flags); + pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600); + pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val); + spin_unlock_irqrestore(&tp->indirect_lock, flags); + return val; +} + +/* usec_wait specifies the wait time in usec when writing to certain registers + * where it is unsafe to read back the register without some delay. + * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power. + * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed. + */ +static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait) +{ + if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND)) + /* Non-posted methods */ + tp->write32(tp, off, val); + else { + /* Posted method */ + tg3_write32(tp, off, val); + if (usec_wait) + udelay(usec_wait); + tp->read32(tp, off); + } + /* Wait again after the read for the posted method to guarantee that + * the wait time is met. + */ + if (usec_wait) + udelay(usec_wait); +} + +static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val) +{ + tp->write32_mbox(tp, off, val); + if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND)) + tp->read32_mbox(tp, off); +} + +static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val) +{ + void __iomem *mbox = tp->regs + off; + writel(val, mbox); + if (tg3_flag(tp, TXD_MBOX_HWBUG)) + writel(val, mbox); + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + readl(mbox); +} + +static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) +{ + return readl(tp->regs + off + GRCMBOX_BASE); +} + +static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->regs + off + GRCMBOX_BASE); +} + +#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) +#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) +#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) +#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) +#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) + +#define tw32(reg, val) tp->write32(tp, reg, val) +#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0) +#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us)) +#define tr32(reg) tp->read32(tp, reg) + +static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) +{ + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) + return; + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + tw32_f(TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) +{ + unsigned long flags; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 && + (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) { + *val = 0; + return; + } + + spin_lock_irqsave(&tp->indirect_lock, flags); + if (tg3_flag(tp, SRAM_USE_CONFIG)) { + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off); + pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + + /* Always leave this as zero. */ + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + } else { + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off); + *val = tr32(TG3PCI_MEM_WIN_DATA); + + /* Always leave this as zero. */ + tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0); + } + spin_unlock_irqrestore(&tp->indirect_lock, flags); +} + +static void tg3_ape_lock_init(struct tg3 *tp) +{ + int i; + u32 regbase, bit; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + regbase = TG3_APE_LOCK_GRANT; + else + regbase = TG3_APE_PER_LOCK_GRANT; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) { + if (i == TG3_APE_LOCK_GPIO) + continue; + tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); + } + + /* Clear the correct bit of the GPIO lock too. */ + if (!tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); +} + +static int tg3_ape_lock(struct tg3 *tp, int locknum) +{ + int i, off; + int ret = 0; + u32 status, req, gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return 0; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + req = TG3_APE_LOCK_REQ; + gnt = TG3_APE_LOCK_GRANT; + } else { + req = TG3_APE_PER_LOCK_REQ; + gnt = TG3_APE_PER_LOCK_GRANT; + } + + off = 4 * locknum; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_REQ_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, req + off, bit); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, gnt + off); + if (status == bit) + break; + udelay(10); + } + + if (status != bit) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, gnt + off, bit); + ret = -EBUSY; + } + + return ret; +} + +static void tg3_ape_unlock(struct tg3 *tp, int locknum) +{ + u32 gnt, bit; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_GPIO: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + return; + case TG3_APE_LOCK_GRC: + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + gnt = TG3_APE_LOCK_GRANT; + else + gnt = TG3_APE_PER_LOCK_GRANT; + + if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) + bit = APE_LOCK_GRANT_DRIVER; + else + bit = 1 << tp->pci_fn; + + tg3_ape_write32(tp, gnt + 4 * locknum, bit); +} + +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + /* NCSI does not support APE events */ + if (tg3_flag(tp, APE_HAS_NCSI)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!tg3_flag(tp, ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, + TG3_APE_HOST_DRVR_STATE_START); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + /* With the interface we are currently using, + * APE does not track driver state. Wiping + * out the HOST SEGMENT SIGNATURE forces + * the APE to assume OS absent status. + */ + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); + + if (device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE)) { + tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, + TG3_APE_HOST_WOL_SPEED_AUTO); + apedata = TG3_APE_HOST_DRVR_STATE_WOL; + } else + apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD; + + tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata); + + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + +static void tg3_disable_ints(struct tg3 *tp) +{ + int i; + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT)); + for (i = 0; i < tp->irq_max; i++) + tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001); +} + +static void tg3_enable_ints(struct tg3 *tp) +{ + int i; + + tp->irq_sync = 0; + wmb(); + + tw32(TG3PCI_MISC_HOST_CTRL, + (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); + + tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + if (tg3_flag(tp, 1SHOT_MSI)) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + tp->coal_now |= tnapi->coal_now; + } + + /* Force an initial interrupt */ + if (!tg3_flag(tp, TAGGED_STATUS) && + (tp->napi[0].hw_status->status & SD_STATUS_UPDATED)) + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + else + tw32(HOSTCC_MODE, tp->coal_now); + + tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now); +} + +static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int work_exists = 0; + + /* check for phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + if (sblk->status & SD_STATUS_LINK_CHG) + work_exists = 1; + } + /* check for RX/TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_exists = 1; + + return work_exists; +} + +/* tg3_int_reenable + * similar to tg3_enable_ints, but it accurately determines whether there + * is new work pending and can return without flushing the PIO write + * which reenables interrupts + */ +static void tg3_int_reenable(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + + /* When doing tagged status, this work check is unnecessary. + * The last_tag we write above tells the chip which piece of + * work we've completed. + */ + if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi)) + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | tnapi->coal_now); +} + +static void tg3_switch_clocks(struct tg3 *tp) +{ + u32 clock_ctrl; + u32 orig_clock_ctrl; + + if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS)) + return; + + clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); + + orig_clock_ctrl = clock_ctrl; + clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | + CLOCK_CTRL_CLKRUN_OENABLE | + 0x1f); + tp->pci_clock_ctrl = clock_ctrl; + + if (tg3_flag(tp, 5705_PLUS)) { + if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | CLOCK_CTRL_625_CORE, 40); + } + } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) { + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | + (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK), + 40); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + clock_ctrl | (CLOCK_CTRL_ALTCLK), + 40); + } + tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40); +} + +#define PHY_BUSY_LOOPS 5000 + +static int tg3_readphy(struct tg3 *tp, int reg, u32 *val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + *val = 0x0; + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (MI_COM_CMD_READ | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) { + *val = frame_val & MI_COM_DATA_MASK; + ret = 0; + } + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; +} + +static int tg3_writephy(struct tg3 *tp, int reg, u32 val) +{ + u32 frame_val; + unsigned int loops; + int ret; + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL)) + return 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) & + MI_COM_PHY_ADDR_MASK); + frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) & + MI_COM_REG_ADDR_MASK); + frame_val |= (val & MI_COM_DATA_MASK); + frame_val |= (MI_COM_CMD_WRITE | MI_COM_START); + + tw32_f(MAC_MI_COM, frame_val); + + loops = PHY_BUSY_LOOPS; + while (loops != 0) { + udelay(10); + frame_val = tr32(MAC_MI_COM); + if ((frame_val & MI_COM_BUSY) == 0) { + udelay(5); + frame_val = tr32(MAC_MI_COM); + break; + } + loops -= 1; + } + + ret = -EBUSY; + if (loops != 0) + ret = 0; + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + return ret; +} + +static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr); + if (err) + goto done; + + err = tg3_writephy(tp, MII_TG3_MMD_CTRL, + MII_TG3_MMD_CTRL_DATA_NOINC | devad); + if (err) + goto done; + + err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val); + +done: + return err; +} + +static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg); + if (!err) + err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); + + return err; +} + +static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val) +{ + int err; + + err = tg3_writephy(tp, MII_TG3_AUX_CTRL, + (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) | + MII_TG3_AUXCTL_SHDWSEL_MISC); + if (!err) + err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val); + + return err; +} + +static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) +{ + if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC) + set |= MII_TG3_AUXCTL_MISC_WREN; + + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); +} + +#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ + MII_TG3_AUXCTL_ACTL_TX_6DB) + +#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ + tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ + MII_TG3_AUXCTL_ACTL_TX_6DB); + +static int tg3_bmcr_reset(struct tg3 *tp) +{ + u32 phy_control; + int limit, err; + + /* OK, reset it, and poll the BMCR_RESET bit until it + * clears or we time out. + */ + phy_control = BMCR_RESET; + err = tg3_writephy(tp, MII_BMCR, phy_control); + if (err != 0) + return -EBUSY; + + limit = 5000; + while (limit--) { + err = tg3_readphy(tp, MII_BMCR, &phy_control); + if (err != 0) + return -EBUSY; + + if ((phy_control & BMCR_RESET) == 0) { + udelay(40); + break; + } + udelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) +{ + struct tg3 *tp = bp->priv; + u32 val; + + spin_lock_bh(&tp->lock); + + if (tg3_readphy(tp, reg, &val)) + val = -EIO; + + spin_unlock_bh(&tp->lock); + + return val; +} + +static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) +{ + struct tg3 *tp = bp->priv; + u32 ret = 0; + + spin_lock_bh(&tp->lock); + + if (tg3_writephy(tp, reg, val)) + ret = -EIO; + + spin_unlock_bh(&tp->lock); + + return ret; +} + +static int tg3_mdio_reset(struct mii_bus *bp) +{ + return 0; +} + +static void tg3_mdio_config_5785(struct tg3 *tp) +{ + u32 val; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + val = MAC_PHYCFG2_50610_LED_MODES; + break; + case PHY_ID_BCMAC131: + val = MAC_PHYCFG2_AC131_LED_MODES; + break; + case PHY_ID_RTL8211C: + val = MAC_PHYCFG2_RTL8211C_LED_MODES; + break; + case PHY_ID_RTL8201E: + val = MAC_PHYCFG2_RTL8201E_LED_MODES; + break; + default: + return; + } + + if (phydev->interface != PHY_INTERFACE_MODE_RGMII) { + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RGMII_INT | + MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK); + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT; + tw32(MAC_PHYCFG1, val); + + return; + } + + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) + val |= MAC_PHYCFG2_EMODE_MASK_MASK | + MAC_PHYCFG2_FMODE_MASK_MASK | + MAC_PHYCFG2_GMODE_MASK_MASK | + MAC_PHYCFG2_ACT_MASK_MASK | + MAC_PHYCFG2_QUAL_MASK_MASK | + MAC_PHYCFG2_INBAND_ENABLE; + + tw32(MAC_PHYCFG2, val); + + val = tr32(MAC_PHYCFG1); + val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK | + MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_PHYCFG1_RGMII_SND_STAT_EN; + } + val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT | + MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV; + tw32(MAC_PHYCFG1, val); + + val = tr32(MAC_EXT_RGMII_MODE); + val &= ~(MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET | + MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET); + if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) { + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + val |= MAC_RGMII_MODE_RX_INT_B | + MAC_RGMII_MODE_RX_QUALITY | + MAC_RGMII_MODE_RX_ACTIVITY | + MAC_RGMII_MODE_RX_ENG_DET; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + val |= MAC_RGMII_MODE_TX_ENABLE | + MAC_RGMII_MODE_TX_LOWPWR | + MAC_RGMII_MODE_TX_RESET; + } + tw32(MAC_EXT_RGMII_MODE, val); +} + +static void tg3_mdio_start(struct tg3 *tp) +{ + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + + if (tg3_flag(tp, MDIOBUS_INITED) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); +} + +static int tg3_mdio_init(struct tg3 *tp) +{ + int i; + u32 reg; + struct phy_device *phydev; + + if (tg3_flag(tp, 5717_PLUS)) { + u32 is_serdes; + + tp->phy_addr = tp->pci_fn + 1; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) + is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES; + else + is_serdes = tr32(TG3_CPMU_PHY_STRAP) & + TG3_CPMU_PHY_STRAP_IS_SERDES; + if (is_serdes) + tp->phy_addr += 7; + } else + tp->phy_addr = TG3_PHY_MII_ADDR; + + tg3_mdio_start(tp); + + if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED)) + return 0; + + tp->mdio_bus = mdiobus_alloc(); + if (tp->mdio_bus == NULL) + return -ENOMEM; + + tp->mdio_bus->name = "tg3 mdio bus"; + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", + (tp->pdev->bus->number << 8) | tp->pdev->devfn); + tp->mdio_bus->priv = tp; + tp->mdio_bus->parent = &tp->pdev->dev; + tp->mdio_bus->read = &tg3_mdio_read; + tp->mdio_bus->write = &tg3_mdio_write; + tp->mdio_bus->reset = &tg3_mdio_reset; + tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); + tp->mdio_bus->irq = &tp->mdio_irq[0]; + + for (i = 0; i < PHY_MAX_ADDR; i++) + tp->mdio_bus->irq[i] = PHY_POLL; + + /* The bus registration will look for all the PHYs on the mdio bus. + * Unfortunately, it does not ensure the PHY is powered up before + * accessing the PHY ID registers. A chip reset is the + * quickest way to bring the device back to an operational state.. + */ + if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN)) + tg3_bmcr_reset(tp); + + i = mdiobus_register(tp->mdio_bus); + if (i) { + dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i); + mdiobus_free(tp->mdio_bus); + return i; + } + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!phydev || !phydev->drv) { + dev_warn(&tp->pdev->dev, "No PHY devices\n"); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + return -ENODEV; + } + + switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_BCM57780: + phydev->interface = PHY_INTERFACE_MODE_GMII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + break; + case PHY_ID_BCM50610: + case PHY_ID_BCM50610M: + phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | + PHY_BRCM_RX_REFCLK_UNUSED | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_AUTO_PWRDWN_ENABLE; + if (tg3_flag(tp, RGMII_INBAND_DISABLE)) + phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; + if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) + phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; + /* fallthru */ + case PHY_ID_RTL8211C: + phydev->interface = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_ID_RTL8201E: + case PHY_ID_BCMAC131: + phydev->interface = PHY_INTERFACE_MODE_MII; + phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; + tp->phy_flags |= TG3_PHYFLG_IS_FET; + break; + } + + tg3_flag_set(tp, MDIOBUS_INITED); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_mdio_config_5785(tp); + + return 0; +} + +static void tg3_mdio_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, MDIOBUS_INITED)) { + tg3_flag_clear(tp, MDIOBUS_INITED); + mdiobus_unregister(tp->mdio_bus); + mdiobus_free(tp->mdio_bus); + } +} + +/* tp->lock is held. */ +static inline void tg3_generate_fw_event(struct tg3 *tp) +{ + u32 val; + + val = tr32(GRC_RX_CPU_EVENT); + val |= GRC_RX_CPU_DRIVER_EVENT; + tw32_f(GRC_RX_CPU_EVENT, val); + + tp->last_event_jiffies = jiffies; +} + +#define TG3_FW_EVENT_TIMEOUT_USEC 2500 + +/* tp->lock is held. */ +static void tg3_wait_for_event_ack(struct tg3 *tp) +{ + int i; + unsigned int delay_cnt; + long time_remain; + + /* If enough time has passed, no wait is necessary. */ + time_remain = (long)(tp->last_event_jiffies + 1 + + usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) - + (long)jiffies; + if (time_remain < 0) + return; + + /* Check if we can shorten the wait time. */ + delay_cnt = jiffies_to_usecs(time_remain); + if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC) + delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC; + delay_cnt = (delay_cnt >> 3) + 1; + + for (i = 0; i < delay_cnt; i++) { + if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT)) + break; + udelay(8); + } +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 reg; + u32 val; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + + val = 0; + if (!tg3_readphy(tp, MII_BMCR, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_BMSR, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + + val = 0; + if (!tg3_readphy(tp, MII_ADVERTISE, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_LPA, ®)) + val |= (reg & 0xffff); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + + val = 0; + if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { + if (!tg3_readphy(tp, MII_CTRL1000, ®)) + val = reg << 16; + if (!tg3_readphy(tp, MII_STAT1000, ®)) + val |= (reg & 0xffff); + } + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + + if (!tg3_readphy(tp, MII_PHYADDR, ®)) + val = reg << 16; + else + val = 0; + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + + tg3_generate_fw_event(tp); +} + +/* tp->lock is held. */ +static void tg3_stop_fw(struct tg3 *tp) +{ + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + /* Wait for RX cpu to ACK the previous event. */ + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW); + + tg3_generate_fw_event(tp); + + /* Wait for RX cpu to ACK this event. */ + tg3_wait_for_event_ack(tp); + } +} + +/* tp->lock is held. */ +static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) +{ + tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC1); + + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START_DONE); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD_DONE); + break; + + default: + break; + } + } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); +} + +/* tp->lock is held. */ +static void tg3_write_sig_legacy(struct tg3 *tp, int kind) +{ + if (tg3_flag(tp, ENABLE_ASF)) { + switch (kind) { + case RESET_KIND_INIT: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_START); + break; + + case RESET_KIND_SHUTDOWN: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_UNLOAD); + break; + + case RESET_KIND_SUSPEND: + tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX, + DRV_STATE_SUSPEND); + break; + + default: + break; + } + } +} + +static int tg3_poll_fw(struct tg3 *tp) +{ + int i; + u32 val; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* Wait up to 20ms for init done. */ + for (i = 0; i < 200; i++) { + if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE) + return 0; + udelay(100); + } + return -ENODEV; + } + + /* Wait for firmware initialization to complete. */ + for (i = 0; i < 100000; i++) { + tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + udelay(10); + } + + /* Chip might not be fitted with firmware. Some Sun onboard + * parts are configured like that. So don't signal the timeout + * of the above loop as an error, but do report the lack of + * running firmware once. + */ + if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) { + tg3_flag_set(tp, NO_FWARE_REPORTED); + + netdev_info(tp->dev, "No firmware running\n"); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + /* The 57765 A0 needs a little more + * time to do some important work. + */ + mdelay(10); + } + + return 0; +} + +static void tg3_link_report(struct tg3 *tp) +{ + if (!netif_carrier_ok(tp->dev)) { + netif_info(tp, link, tp->dev, "Link is down\n"); + tg3_ump_link_report(tp); + } else if (netif_msg_link(tp)) { + netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n", + (tp->link_config.active_speed == SPEED_1000 ? + 1000 : + (tp->link_config.active_speed == SPEED_100 ? + 100 : 10)), + (tp->link_config.active_duplex == DUPLEX_FULL ? + "full" : "half")); + + netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n", + (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ? + "on" : "off", + (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ? + "on" : "off"); + + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + netdev_info(tp->dev, "EEE is %s\n", + tp->setlpicnt ? "enabled" : "disabled"); + + tg3_ump_link_report(tp); + } +} + +static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_PAUSE_CAP; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_PAUSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) +{ + u16 miireg; + + if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) + miireg = ADVERTISE_1000XPAUSE; + else if (flow_ctrl & FLOW_CTRL_TX) + miireg = ADVERTISE_1000XPSE_ASYM; + else if (flow_ctrl & FLOW_CTRL_RX) + miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; + else + miireg = 0; + + return miireg; +} + +static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & ADVERTISE_1000XPAUSE) { + if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_1000XPAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_1000XPAUSE) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { + if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) +{ + u8 autoneg; + u8 flowctrl = 0; + u32 old_rx_mode = tp->rx_mode; + u32 old_tx_mode = tp->tx_mode; + + if (tg3_flag(tp, USE_PHYLIB)) + autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; + else + autoneg = tp->link_config.autoneg; + + if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); + else + flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + } else + flowctrl = tp->link_config.flowctrl; + + tp->link_config.active_flowctrl = flowctrl; + + if (flowctrl & FLOW_CTRL_RX) + tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; + else + tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; + + if (old_rx_mode != tp->rx_mode) + tw32_f(MAC_RX_MODE, tp->rx_mode); + + if (flowctrl & FLOW_CTRL_TX) + tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; + else + tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; + + if (old_tx_mode != tp->tx_mode) + tw32_f(MAC_TX_MODE, tp->tx_mode); +} + +static void tg3_adjust_link(struct net_device *dev) +{ + u8 oldflowctrl, linkmesg = 0; + u32 mac_mode, lcl_adv, rmt_adv; + struct tg3 *tp = netdev_priv(dev); + struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + spin_lock_bh(&tp->lock); + + mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | + MAC_MODE_HALF_DUPLEX); + + oldflowctrl = tp->link_config.active_flowctrl; + + if (phydev->link) { + lcl_adv = 0; + rmt_adv = 0; + + if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) + mac_mode |= MAC_MODE_PORT_MODE_MII; + else if (phydev->speed == SPEED_1000 || + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (phydev->duplex == DUPLEX_HALF) + mac_mode |= MAC_MODE_HALF_DUPLEX; + else { + lcl_adv = tg3_advert_flowctrl_1000T( + tp->link_config.flowctrl); + + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + } + + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } else + mac_mode |= MAC_MODE_PORT_MODE_GMII; + + if (mac_mode != tp->mac_mode) { + tp->mac_mode = mac_mode; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + if (phydev->speed == SPEED_10) + tw32(MAC_MI_STAT, + MAC_MI_STAT_10MBPS_MODE | + MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + else + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + } + + if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT))); + else + tw32(MAC_TX_LENGTHS, + ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); + + if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || + (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + phydev->speed != tp->link_config.active_speed || + phydev->duplex != tp->link_config.active_duplex || + oldflowctrl != tp->link_config.active_flowctrl) + linkmesg = 1; + + tp->link_config.active_speed = phydev->speed; + tp->link_config.active_duplex = phydev->duplex; + + spin_unlock_bh(&tp->lock); + + if (linkmesg) + tg3_link_report(tp); +} + +static int tg3_phy_init(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) + return 0; + + /* Bring the PHY back to a known state. */ + tg3_bmcr_reset(tp); + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + /* Attach the MAC to the PHY. */ + phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, + phydev->dev_flags, phydev->interface); + if (IS_ERR(phydev)) { + dev_err(&tp->pdev->dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* Mask with MAC supported features. */ + switch (phydev->interface) { + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + phydev->supported &= (PHY_GBIT_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + } + /* fallthru */ + case PHY_INTERFACE_MODE_MII: + phydev->supported &= (PHY_BASIC_FEATURES | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + return -EINVAL; + } + + tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; + + phydev->advertising = phydev->supported; + + return 0; +} + +static void tg3_phy_start(struct tg3 *tp) +{ + struct phy_device *phydev; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + phydev->speed = tp->link_config.orig_speed; + phydev->duplex = tp->link_config.orig_duplex; + phydev->autoneg = tp->link_config.orig_autoneg; + phydev->advertising = tp->link_config.orig_advertising; + } + + phy_start(phydev); + + phy_start_aneg(phydev); +} + +static void tg3_phy_stop(struct tg3 *tp) +{ + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return; + + phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +} + +static void tg3_phy_fini(struct tg3 *tp) +{ + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED; + } +} + +static int tg3_phy_set_extloopbk(struct tg3 *tp) +{ + int err; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + return 0; + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + MII_TG3_AUXCTL_ACTL_EXTLOOPBK | + 0x4c20); + goto done; + } + + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (err) + return err; + + val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK; + err = tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val); + +done: + return err; +} + +static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 phytest; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD; + else + phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD; + tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } +} + +static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable) +{ + u32 reg; + + if (!tg3_flag(tp, 5705_PLUS) || + (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + tg3_phy_fet_toggle_apd(tp, enable); + return; + } + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_SCR5_SEL | + MII_TG3_MISC_SHDW_SCR5_LPED | + MII_TG3_MISC_SHDW_SCR5_DLPTLM | + MII_TG3_MISC_SHDW_SCR5_SDTL | + MII_TG3_MISC_SHDW_SCR5_C125OE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable) + reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); + + + reg = MII_TG3_MISC_SHDW_WREN | + MII_TG3_MISC_SHDW_APD_SEL | + MII_TG3_MISC_SHDW_APD_WKTM_84MS; + if (enable) + reg |= MII_TG3_MISC_SHDW_APD_ENABLE; + + tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); +} + +static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) +{ + u32 phy; + + if (!tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + return; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 ephy; + + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) { + u32 reg = MII_TG3_FET_SHDW_MISCCTRL; + + tg3_writephy(tp, MII_TG3_FET_TEST, + ephy | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, reg, &phy)) { + if (enable) + phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX; + else + phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX; + tg3_writephy(tp, reg, phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, ephy); + } + } else { + int ret; + + ret = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, &phy); + if (!ret) { + if (enable) + phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + else + phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX; + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISC, phy); + } + } +} + +static void tg3_phy_set_wirespeed(struct tg3 *tp) +{ + int ret; + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) + return; + + ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val); + if (!ret) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, + val | MII_TG3_AUXCTL_MISC_WIRESPD_EN); +} + +static void tg3_phy_apply_otp(struct tg3 *tp) +{ + u32 otp, phy; + + if (!tp->phy_otp) + return; + + otp = tp->phy_otp; + + if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + return; + + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); + phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy); + + phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) | + ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy); + + phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT); + phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ; + tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy); + + phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy); + + phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy); + + phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) | + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); +} + +static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) +{ + u32 val; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + tp->setlpicnt = 0; + + if (tp->link_config.autoneg == AUTONEG_ENABLE && + current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL && + (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_1000)) { + u32 eeectl; + + if (tp->link_config.active_speed == SPEED_1000) + eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US; + else + eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US; + + tw32(TG3_CPMU_EEE_CTRL, eeectl); + + tg3_phy_cl45_read(tp, MDIO_MMD_AN, + TG3_CL45_D7_EEERES_STAT, &val); + + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tp->setlpicnt = 2; + } + + if (!tp->setlpicnt) { + if (current_link_up == 1 && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE); + } +} + +static void tg3_phy_eee_enable(struct tg3 *tp) +{ + u32 val; + + if (tp->link_config.active_speed == SPEED_1000 && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + val = tr32(TG3_CPMU_EEE_MODE); + tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE); +} + +static int tg3_wait_macro_done(struct tg3 *tp) +{ + int limit = 100; + + while (limit--) { + u32 tmp32; + + if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) { + if ((tmp32 & 0x1000) == 0) + break; + } + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp) +{ + static const u32 test_pat[4][6] = { + { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 }, + { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 }, + { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 }, + { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 } + }; + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, + test_pat[chan][i]); + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802); + if (tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + + for (i = 0; i < 6; i += 2) { + u32 low, high; + + if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) || + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) || + tg3_wait_macro_done(tp)) { + *resetp = 1; + return -EBUSY; + } + low &= 0x7fff; + high &= 0x000f; + if (low != test_pat[chan][i] || + high != test_pat[chan][i+1]) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001); + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005); + + return -EBUSY; + } + } + } + + return 0; +} + +static int tg3_phy_reset_chanpat(struct tg3 *tp) +{ + int chan; + + for (chan = 0; chan < 4; chan++) { + int i; + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + (chan * 0x2000) | 0x0200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002); + for (i = 0; i < 6; i++) + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202); + if (tg3_wait_macro_done(tp)) + return -EBUSY; + } + + return 0; +} + +static int tg3_phy_reset_5703_4_5(struct tg3 *tp) +{ + u32 reg32, phy9_orig; + int retries, do_phy_reset, err; + + retries = 10; + do_phy_reset = 1; + do { + if (do_phy_reset) { + err = tg3_bmcr_reset(tp); + if (err) + return err; + do_phy_reset = 0; + } + + /* Disable transmitter and interrupt. */ + if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) + continue; + + reg32 |= 0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + + /* Set full-duplex, 1000 mbps. */ + tg3_writephy(tp, MII_BMCR, + BMCR_FULLDPLX | BMCR_SPEED1000); + + /* Set to master mode. */ + if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig)) + continue; + + tg3_writephy(tp, MII_CTRL1000, + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (err) + return err; + + /* Block the PHY control access. */ + tg3_phydsp_write(tp, 0x8005, 0x0800); + + err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset); + if (!err) + break; + } while (--retries); + + err = tg3_phy_reset_chanpat(tp); + if (err) + return err; + + tg3_phydsp_write(tp, 0x8005, 0x0000); + + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + + tg3_writephy(tp, MII_CTRL1000, phy9_orig); + + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { + reg32 &= ~0x3000; + tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + } else if (!err) + err = -EBUSY; + + return err; +} + +/* This will reset the tigon3 PHY if there is no valid + * link unless the FORCE argument is non-zero. + */ +static int tg3_phy_reset(struct tg3 *tp) +{ + u32 val, cpmuctrl; + int err; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + } + err = tg3_readphy(tp, MII_BMSR, &val); + err |= tg3_readphy(tp, MII_BMSR, &val); + if (err != 0) + return -EBUSY; + + if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + err = tg3_phy_reset_5703_4_5(tp); + if (err) + return err; + goto out; + } + + cpmuctrl = 0; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + cpmuctrl = tr32(TG3_CPMU_CTRL); + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY); + } + + err = tg3_bmcr_reset(tp); + if (err) + return err; + + if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { + val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz; + tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val); + + tw32(TG3_CPMU_CTRL, cpmuctrl); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == + CPMU_LSPD_1000MB_MACCLK_12_5) { + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + udelay(40); + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + } + + if (tg3_flag(tp, 5717_PLUS) && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) + return 0; + + tg3_phy_apply_otp(tp); + + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + else + tg3_phy_toggle_apd(tp, false); + +out: + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && + !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x201f, 0x2aaa); + tg3_phydsp_write(tp, 0x000a, 0x0323); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + } + + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_phydsp_write(tp, 0x000a, 0x310b); + tg3_phydsp_write(tp, 0x201f, 0x9506); + tg3_phydsp_write(tp, 0x401f, 0x14e2); + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { + if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); + tg3_writephy(tp, MII_TG3_TEST1, + MII_TG3_TEST1_TRIM_EN | 0x4); + } else + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); + + TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + } + } + + /* Set Extended packet length bit (bit 14) on all chips that */ + /* support jumbo frames */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + /* Cannot do read-modify-write on 5401 */ + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + } else if (tg3_flag(tp, JUMBO_CAPABLE)) { + /* Set bit 14 with read-modify-write to preserve other bits */ + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + if (!err) + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN); + } + + /* Set phy register 0x10 bit 0 to high fifo elasticity to support + * jumbo frames transmission. + */ + if (tg3_flag(tp, JUMBO_CAPABLE)) { + if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + val | MII_TG3_EXT_CTRL_FIFO_ELASTIC); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* adjust output voltage */ + tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12); + } + + tg3_phy_toggle_automdix(tp, 1); + tg3_phy_set_wirespeed(tp); + return 0; +} + +#define TG3_GPIO_MSG_DRVR_PRES 0x00000001 +#define TG3_GPIO_MSG_NEED_VAUX 0x00000002 +#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \ + TG3_GPIO_MSG_NEED_VAUX) +#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \ + ((TG3_GPIO_MSG_DRVR_PRES << 0) | \ + (TG3_GPIO_MSG_DRVR_PRES << 4) | \ + (TG3_GPIO_MSG_DRVR_PRES << 8) | \ + (TG3_GPIO_MSG_DRVR_PRES << 12)) + +#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \ + ((TG3_GPIO_MSG_NEED_VAUX << 0) | \ + (TG3_GPIO_MSG_NEED_VAUX << 4) | \ + (TG3_GPIO_MSG_NEED_VAUX << 8) | \ + (TG3_GPIO_MSG_NEED_VAUX << 12)) + +static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat) +{ + u32 status, shift; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG); + else + status = tr32(TG3_CPMU_DRV_STATUS); + + shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn; + status &= ~(TG3_GPIO_MSG_MASK << shift); + status |= (newstat << shift); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status); + else + tw32(TG3_CPMU_DRV_STATUS, status); + + return status >> TG3_APE_GPIO_MSG_SHIFT; +} + +static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return -EIO; + + tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES); + + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); + } else { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + return 0; +} + +static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp) +{ + u32 grc_local_ctrl; + + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) + return; + + grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1; + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + tw32_wait_f(GRC_LOCAL_CTRL, + grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1, + TG3_GRC_LCLCTL_PWRSW_DELAY); +} + +static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp) +{ + if (!tg3_flag(tp, IS_NIC)) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + (GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1), + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */ + u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + tp->grc_local_ctrl; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0; + tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } else { + u32 no_gpio2; + u32 grc_local_ctrl = 0; + + /* Workaround to prevent overdrawing Amps. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | + grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + + /* On 5753 and variants, GPIO2 cannot be used. */ + no_gpio2 = tp->nic_sram_data_cfg & + NIC_SRAM_DATA_CFG_NO_GPIO2; + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT1 | + GRC_LCLCTRL_GPIO_OUTPUT2; + if (no_gpio2) { + grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 | + GRC_LCLCTRL_GPIO_OUTPUT2); + } + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0; + + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + + if (!no_gpio2) { + grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2; + tw32_wait_f(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | grc_local_ctrl, + TG3_GRC_LCLCTL_PWRSW_DELAY); + } + } +} + +static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable) +{ + u32 msg = 0; + + /* Serialize power state transitions */ + if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO)) + return; + + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable) + msg = TG3_GPIO_MSG_NEED_VAUX; + + msg = tg3_set_function_status(tp, msg); + + if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK) + goto done; + + if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); + +done: + tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO); +} + +static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) +{ + bool need_vaux = false; + + /* The GPIOs do something completely different on 57765. */ + if (!tg3_flag(tp, IS_NIC) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + return; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_frob_aux_power_5717(tp, include_wol ? + tg3_flag(tp, WOL_ENABLE) != 0 : 0); + return; + } + + if (tp->pdev_peer && tp->pdev_peer != tp->pdev) { + struct net_device *dev_peer; + + dev_peer = pci_get_drvdata(tp->pdev_peer); + + /* remove_one() may have been run on the peer. */ + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tg3_flag(tp_peer, INIT_COMPLETE)) + return; + + if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) || + tg3_flag(tp_peer, ENABLE_ASF)) + need_vaux = true; + } + } + + if ((include_wol && tg3_flag(tp, WOL_ENABLE)) || + tg3_flag(tp, ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) + tg3_pwrsrc_switch_to_vaux(tp); + else + tg3_pwrsrc_die_with_vmain(tp); +} + +static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) +{ + if (tp->led_ctrl == LED_CTRL_MODE_PHY_2) + return 1; + else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) { + if (speed != SPEED_10) + return 1; + } else if (speed == SPEED_10) + return 1; + + return 0; +} + +static int tg3_setup_phy(struct tg3 *, int); +static int tg3_halt_cpu(struct tg3 *, u32); + +static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) +{ + u32 val; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 sg_dig_ctrl = tr32(SG_DIG_CTRL); + u32 serdes_cfg = tr32(MAC_SERDES_CFG); + + sg_dig_ctrl |= + SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET; + tw32(SG_DIG_CTRL, sg_dig_ctrl); + tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15)); + } + return; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_bmcr_reset(tp); + val = tr32(GRC_MISC_CFG); + tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); + udelay(40); + return; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + u32 phytest; + if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { + u32 phy; + + tg3_writephy(tp, MII_ADVERTISE, 0); + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + + tg3_writephy(tp, MII_TG3_FET_TEST, + phytest | MII_TG3_FET_SHADOW_EN); + if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { + phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; + tg3_writephy(tp, + MII_TG3_FET_SHDW_AUXMODE4, + phy); + } + tg3_writephy(tp, MII_TG3_FET_TEST, phytest); + } + return; + } else if (do_low_power) { + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); + + val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | + MII_TG3_AUXCTL_PCTL_VREG_11V; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val); + } + + /* The PHY should not be powered down on some chips because + * of bugs. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 && + (tp->phy_flags & TG3_PHYFLG_MII_SERDES))) + return; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { + val = tr32(TG3_CPMU_LSPD_1000MB_CLK); + val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; + val |= CPMU_LSPD_1000MB_MACCLK_12_5; + tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); + } + + tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +/* tp->lock is held. */ +static int tg3_nvram_lock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + int i; + + if (tp->nvram_lock_cnt == 0) { + tw32(NVRAM_SWARB, SWARB_REQ_SET1); + for (i = 0; i < 8000; i++) { + if (tr32(NVRAM_SWARB) & SWARB_GNT1) + break; + udelay(20); + } + if (i == 8000) { + tw32(NVRAM_SWARB, SWARB_REQ_CLR1); + return -ENODEV; + } + } + tp->nvram_lock_cnt++; + } + return 0; +} + +/* tp->lock is held. */ +static void tg3_nvram_unlock(struct tg3 *tp) +{ + if (tg3_flag(tp, NVRAM)) { + if (tp->nvram_lock_cnt > 0) + tp->nvram_lock_cnt--; + if (tp->nvram_lock_cnt == 0) + tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1); + } +} + +/* tp->lock is held. */ +static void tg3_enable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); + } +} + +/* tp->lock is held. */ +static void tg3_disable_nvram_access(struct tg3 *tp) +{ + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) { + u32 nvaccess = tr32(NVRAM_ACCESS); + + tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); + } +} + +static int tg3_nvram_read_using_eeprom(struct tg3 *tp, + u32 offset, u32 *val) +{ + u32 tmp; + int i; + + if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) + return -EINVAL; + + tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | + EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, + tmp | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + ((offset << EEPROM_ADDR_ADDR_SHIFT) & + EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_READ | EEPROM_ADDR_START); + + for (i = 0; i < 1000; i++) { + tmp = tr32(GRC_EEPROM_ADDR); + + if (tmp & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(tmp & EEPROM_ADDR_COMPLETE)) + return -EBUSY; + + tmp = tr32(GRC_EEPROM_DATA); + + /* + * The data will always be opposite the native endian + * format. Perform a blind byteswap to compensate. + */ + *val = swab32(tmp); + + return 0; +} + +#define NVRAM_CMD_TIMEOUT 10000 + +static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) +{ + int i; + + tw32(NVRAM_CMD, nvram_cmd); + for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { + udelay(10); + if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { + udelay(10); + break; + } + } + + if (i == NVRAM_CMD_TIMEOUT) + return -EBUSY; + + return 0; +} + +static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr / tp->nvram_pagesize) << + ATMEL_AT45DB0X1B_PAGE_POS) + + (addr % tp->nvram_pagesize); + + return addr; +} + +static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) +{ + if (tg3_flag(tp, NVRAM) && + tg3_flag(tp, NVRAM_BUFFERED) && + tg3_flag(tp, FLASH) && + !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) && + (tp->nvram_jedecnum == JEDEC_ATMEL)) + + addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * + tp->nvram_pagesize) + + (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); + + return addr; +} + +/* NOTE: Data read in from NVRAM is byteswapped according to + * the byteswapping settings for all other register accesses. + * tg3 devices are BE devices, so on a BE machine, the data + * returned will be exactly as it is seen in NVRAM. On a LE + * machine, the 32-bit value will be byteswapped. + */ +static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) +{ + int ret; + + if (!tg3_flag(tp, NVRAM)) + return tg3_nvram_read_using_eeprom(tp, offset, val); + + offset = tg3_nvram_phys_addr(tp, offset); + + if (offset > NVRAM_ADDR_MSK) + return -EINVAL; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + + tw32(NVRAM_ADDR, offset); + ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); + + if (ret == 0) + *val = tr32(NVRAM_RDDATA); + + tg3_disable_nvram_access(tp); + + tg3_nvram_unlock(tp); + + return ret; +} + +/* Ensures NVRAM data is in bytestream format. */ +static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) +{ + u32 v; + int res = tg3_nvram_read(tp, offset, &v); + if (!res) + *val = cpu_to_be32(v); + return res; +} + +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 + +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +{ + int i; + + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); + + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } + + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } + + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} + +struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; +}; + +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) +{ + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); + + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } + + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; + + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; + + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); + + err = 0; + +out: + return err; +} + +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + int err, i; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; + + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); + + return 0; +} + +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ + + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } + + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; + + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) + break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; +} + + +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +{ + u32 addr_high, addr_low; + int i; + + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } + + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); +} + +static void tg3_enable_register_access(struct tg3 *tp) +{ + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} + +static int tg3_power_up(struct tg3 *tp) +{ + int err; + + tg3_enable_register_access(tp); + + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); + } + + return err; +} + +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; + + tg3_enable_register_access(tp); + + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); + } + + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); + + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + + tp->link_config.orig_speed = phydev->speed; + tp->link_config.orig_duplex = phydev->duplex; + tp->link_config.orig_autoneg = phydev->autoneg; + tp->link_config.orig_advertising = phydev->advertising; + + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; + + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; + } + + phydev->advertising = advertising; + + phy_start_aneg(phydev); + + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; + + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + tp->link_config.speed = SPEED_10; + tp->link_config.duplex = DUPLEX_HALF; + tp->link_config.autoneg = AUTONEG_ENABLE; + tg3_setup_phy(tp, 0); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; + + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; + + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } + } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); + + if (device_should_wake) { + u32 mac_mode; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; + + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } + + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); + + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; + + tw32_f(MAC_MODE, mac_mode); + udelay(100); + + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } + + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; + + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); + + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } + + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } + } + + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); + + tg3_frob_aux_power(tp, true); + + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); + + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; + + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + } + } + + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + + return 0; +} + +static void tg3_power_down(struct tg3 *tp) +{ + tg3_power_down_prepare(tp); + + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); +} + +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +{ + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; + + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; + + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; + + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; + } + *speed = SPEED_INVALID; + *duplex = DUPLEX_INVALID; + break; + } +} + +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) +{ + int err = 0; + u32 val, new_adv; + + new_adv = ADVERTISE_CSMA; + if (advertise & ADVERTISED_10baseT_Half) + new_adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + new_adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + new_adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + new_adv |= ADVERTISE_100FULL; + + new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + goto done; + + new_adv = 0; + if (advertise & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000HALF; + if (advertise & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000FULL; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; + + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; + + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; + + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } + + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } + +done: + return err; +} + +static void tg3_phy_copper_begin(struct tg3 *tp) +{ + u32 new_adv; + int i; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_INVALID) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + } else { + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; + } + + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); + } + + if (tp->link_config.autoneg == AUTONEG_DISABLE && + tp->link_config.speed != SPEED_INVALID) { + u32 bmcr, orig_bmcr; + + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; + + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; + + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } + + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; + + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } else { + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } +} + +static int tg3_init_5401phy_dsp(struct tg3 *tp) +{ + int err; + + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + + udelay(40); + + return err; +} + +static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) +{ + u32 adv_reg, all_mask = 0; + + if (mask & ADVERTISED_10baseT_Half) + all_mask |= ADVERTISE_10HALF; + if (mask & ADVERTISED_10baseT_Full) + all_mask |= ADVERTISE_10FULL; + if (mask & ADVERTISED_100baseT_Half) + all_mask |= ADVERTISE_100HALF; + if (mask & ADVERTISED_100baseT_Full) + all_mask |= ADVERTISE_100FULL; + + if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) + return 0; + + if ((adv_reg & ADVERTISE_ALL) != all_mask) + return 0; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; + + all_mask = 0; + if (mask & ADVERTISED_1000baseT_Half) + all_mask |= ADVERTISE_1000HALF; + if (mask & ADVERTISED_1000baseT_Full) + all_mask |= ADVERTISE_1000FULL; + + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return 0; + + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); + if (tg3_ctrl != all_mask) + return 0; + } + + return 1; +} + +static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) +{ + u32 curadv, reqadv; + + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return 1; + + curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + + if (tp->link_config.active_duplex == DUPLEX_FULL) { + if (curadv != reqadv) + return 0; + + if (tg3_flag(tp, PAUSE_AUTONEG)) + tg3_readphy(tp, MII_LPA, rmtadv); + } else { + /* Reprogram the advertisement register, even if it + * does not affect the current link. If the link + * gets renegotiated in the future, we can save an + * additional renegotiation cycle by advertising + * it correctly in the first place. + */ + if (curadv != reqadv) { + *lcladv &= ~(ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); + } + } + + return 1; +} + +static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) +{ + int current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } + + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + netif_carrier_ok(tp->dev)) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = 1; + } + if (force_reset) + tg3_phy_reset(tp); + + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; + + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } + + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); + + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); + } + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } + + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } + + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; + + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } + + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); + + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } + + lcl_adv = 0; + rmt_adv = 0; + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if ((bmcr & BMCR_ANENABLE) && + tg3_copper_is_advertising_all(tp, + tp->link_config.advertising)) { + if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, + &rmt_adv)) + current_link_up = 1; + } + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex && + tp->link_config.flowctrl == + tp->link_config.active_flowctrl) { + current_link_up = 1; + } + } + + if (current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + +relink: + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); + + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = 1; + } + + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up == 1) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tg3_phy_eee_adjust(tp, current_link_up); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + current_link_up == 1 && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + } + + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 oldlnkctl, newlnkctl; + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } + + return 0; +} + +struct tg3_fiber_aneginfo { + int state; +#define ANEG_STATE_UNKNOWN 0 +#define ANEG_STATE_AN_ENABLE 1 +#define ANEG_STATE_RESTART_INIT 2 +#define ANEG_STATE_RESTART 3 +#define ANEG_STATE_DISABLE_LINK_OK 4 +#define ANEG_STATE_ABILITY_DETECT_INIT 5 +#define ANEG_STATE_ABILITY_DETECT 6 +#define ANEG_STATE_ACK_DETECT_INIT 7 +#define ANEG_STATE_ACK_DETECT 8 +#define ANEG_STATE_COMPLETE_ACK_INIT 9 +#define ANEG_STATE_COMPLETE_ACK 10 +#define ANEG_STATE_IDLE_DETECT_INIT 11 +#define ANEG_STATE_IDLE_DETECT 12 +#define ANEG_STATE_LINK_OK 13 +#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 +#define ANEG_STATE_NEXT_PAGE_WAIT 15 + + u32 flags; +#define MR_AN_ENABLE 0x00000001 +#define MR_RESTART_AN 0x00000002 +#define MR_AN_COMPLETE 0x00000004 +#define MR_PAGE_RX 0x00000008 +#define MR_NP_LOADED 0x00000010 +#define MR_TOGGLE_TX 0x00000020 +#define MR_LP_ADV_FULL_DUPLEX 0x00000040 +#define MR_LP_ADV_HALF_DUPLEX 0x00000080 +#define MR_LP_ADV_SYM_PAUSE 0x00000100 +#define MR_LP_ADV_ASYM_PAUSE 0x00000200 +#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 +#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 +#define MR_LP_ADV_NEXT_PAGE 0x00001000 +#define MR_TOGGLE_RX 0x00002000 +#define MR_NP_RX 0x00004000 + +#define MR_LINK_OK 0x80000000 + + unsigned long link_time, cur_time; + + u32 ability_match_cfg; + int ability_match_count; + + char ability_match, idle_match, ack_match; + + u32 txconfig, rxconfig; +#define ANEG_CFG_NP 0x00000080 +#define ANEG_CFG_ACK 0x00000040 +#define ANEG_CFG_RF2 0x00000020 +#define ANEG_CFG_RF1 0x00000010 +#define ANEG_CFG_PS2 0x00000001 +#define ANEG_CFG_PS1 0x00008000 +#define ANEG_CFG_HD 0x00004000 +#define ANEG_CFG_FD 0x00002000 +#define ANEG_CFG_INVAL 0x00001f06 + +}; +#define ANEG_OK 0 +#define ANEG_DONE 1 +#define ANEG_TIMER_ENAB 2 +#define ANEG_FAILED -1 + +#define ANEG_STATE_SETTLE_TIME 10000 + +static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) +{ + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; + + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; + + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; + } + } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; + } else { + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; + } + + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; + + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; + + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; + + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; + + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; + + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ABILITY_DETECT; + break; + + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_ACK_DETECT; + + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + } + break; + + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; + + ap->link_time = ap->cur_time; + + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; + + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; + + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; + + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; + + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; + + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; + } + + return ret; +} + +static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) +{ + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); + + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; + + udelay(1); + } + + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; + + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; + + return res; +} + +static void tg3_init_bcm8002(struct tg3 *tp) +{ + u32 mac_status = tr32(MAC_STATUS); + int i; + + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; + + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); + + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); + + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); + + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); + + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); + + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); + + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); + + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); + + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); + + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); +} + +static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) +{ + u16 flowctrl; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + int current_link_up; + + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = 0; + + if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; + + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } + + sg_dig_ctrl = tr32(SG_DIG_CTRL); + + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + } + goto out; + } + + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; + + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } +restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); + + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; + + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = 1; + tp->serdes_counter = 0; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } + } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + +out: + return current_link_up; +} + +static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) +{ + int current_link_up = 0; + + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; + + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; + + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; + + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; + + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; + + tg3_setup_flow_control(tp, local_adv, remote_adv); + + current_link_up = 1; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if (current_link_up == 0 && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = 1; + } else { + tg3_setup_flow_control(tp, 0, 0); + + /* Forcing 1000FD link up. */ + current_link_up = 1; + + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + +out: + return current_link_up; +} + +static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) +{ + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + int current_link_up; + int i; + + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + netif_carrier_ok(tp->dev) && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } + } + + tw32_f(MAC_TX_AUTO_NEG, 0); + + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); + + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); + + current_link_up = 0; + mac_status = tr32(MAC_STATUS); + + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; + } + + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = 0; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } + + if (current_link_up == 1) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } else { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } + + return 0; +} + +static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) +{ + int current_link_up, err = 0; + u32 bmsr, bmcr; + u16 current_speed; + u8 current_duplex; + u32 local_adv, remote_adv; + + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32(MAC_EVENT, 0); + + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + + if (force_reset) + tg3_phy_reset(tp); + + current_link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, new_adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + + if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) + new_adv |= ADVERTISE_1000XHALF; + if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) + new_adv |= ADVERTISE_1000XFULL; + + if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, new_adv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; + } + } else { + u32 new_bmcr; + + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; + + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; + + /* Force a linkdown */ + if (netif_carrier_ok(tp->dev)) { + u32 adv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + netif_carrier_off(tp->dev); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + } + + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = 1; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + local_adv = 0; + remote_adv = 0; + + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = 0; + } + } + } + + if (current_link_up == 1 && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); + + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else { + netif_carrier_off(tp->dev); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + tg3_link_report(tp); + } + return err; +} + +static void tg3_serdes_parallel_detect(struct tg3 *tp) +{ + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; + return; + } + + if (!netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; + + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; + + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ + + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; + + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; + + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + } + } +} + +static int tg3_setup_phy(struct tg3 *tp, int force_reset) +{ + u32 val; + int err; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + u32 scale; + + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + } + + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + + if (!tg3_flag(tp, 5705_PLUS)) { + if (netif_carrier_ok(tp->dev)) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } + + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!netif_carrier_ok(tp->dev)) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } + + return err; +} + +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} + +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; + + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} + +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } + + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); + + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} + +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; + + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); + + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; + + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } + + kfree(regs); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); + + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); + } +} + +/* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ +static void tg3_tx_recover(struct tg3 *tp) +{ + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); + + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); + + spin_lock(&tp->lock); + tg3_flag_set(tp, TX_RECOVERY_PENDING); + spin_unlock(&tp->lock); +} + +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) +{ + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); +} + +/* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ +static void tg3_tx(struct tg3_napi *tnapi) +{ + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + + if (tg3_flag(tp, ENABLE_TSS)) + index--; + + txq = netdev_get_tx_queue(tp->dev, index); + + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; + + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } + + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + ri->skb = NULL; + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; + + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); + + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } + + sw_idx = NEXT_TX(sw_idx); + } + + dev_kfree_skb(skb); + + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; + } + } + + tnapi->tx_cons = sw_idx; + + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); + + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); + } +} + +static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +{ + if (!ri->skb) + return; + + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(ri->skb); + ri->skb = NULL; +} + +/* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ +static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) +{ + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + struct sk_buff *skb; + dma_addr_t mapping; + int skb_size, dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + skb_size = tp->rx_pkt_map_sz; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + skb_size = TG3_RX_JMB_MAP_SZ; + break; + + default: + return -EINVAL; + } + + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); + if (skb == NULL) + return -ENOMEM; + + skb_reserve(skb, TG3_RX_OFFSET(tp)); + + mapping = pci_map_single(tp->pdev, skb->data, skb_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + return -EIO; + } + + map->skb = skb; + dma_unmap_addr_set(map, mapping, mapping); + + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); + + return skb_size; +} + +/* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_skb for full details. + */ +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) +{ + struct tg3 *tp = tnapi->tp; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; + + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; + + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; + + default: + return; + } + + dest_map->skb = src_map->skb; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; + + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->skb = NULL; +} + +/* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ +static int tg3_rx(struct tg3_napi *tnapi, int budget) +{ + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + skb = ri->skb; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; + + work_mask |= opaque_key; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } + + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; + + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; + + skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + *post_ptr); + if (skb_size < 0) + goto drop_it; + + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); + + /* Ensure that the update to the skb happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); + + ri->skb = NULL; + + skb_put(skb, len); + } else { + struct sk_buff *copy_skb; + + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + + copy_skb = netdev_alloc_skb(tp->dev, len + + TG3_RAW_IP_ALIGN); + if (copy_skb == NULL) + goto drop_it_no_recycle; + + skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); + skb_put(copy_skb, len); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(skb, copy_skb->data, len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + + /* We'll reuse the original ring buffer. */ + skb = copy_skb; + } + + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + skb->protocol = eth_type_trans(skb, tp->dev); + + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } + + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); + + napi_gro_receive(&tnapi->napi, skb); + + received++; + budget--; + +next_pkt: + (*post_ptr)++; + + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } +next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; + + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } + + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); + + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); + + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; + + if (tnapi != &tp->napi[1]) + napi_schedule(&tp->napi[1].napi); + } + + return received; +} + +static void tg3_poll_link(struct tg3 *tp) +{ + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; + + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); + } + } +} + +static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) +{ + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; + + while (1) { + src_prod_idx = spr->rx_std_prod_idx; + + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_std_cons_idx == src_prod_idx) + break; + + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); + + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } + + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; + + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); + + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].skb) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; + } + + return err; +} + +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +{ + struct tg3 *tp = tnapi->tp; + + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; + } + + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() + */ + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); + + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + + for (i = 1; i < tp->irq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); + + wmb(); + + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); + + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); + + mmiowb(); + + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); + } + + return work_done; +} + +static int tg3_poll_msix(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; +} + +static void tg3_process_error(struct tg3 *tp) +{ + u32 val; + bool real_error = false; + + if (tg3_flag(tp, ERROR_PROCESSED)) + return; + + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } + + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } + + if (!real_error) + return; + + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + schedule_work(&tp->reset_task); +} + +static int tg3_poll(struct napi_struct *napi, int budget) +{ + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; + + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); + + tg3_poll_link(tp); + + work_done = tg3_poll_work(tnapi, work_done, budget); + + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); + break; + } + } + + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + schedule_work(&tp->reset_task); + return work_done; +} + +static void tg3_napi_disable(struct tg3 *tp) +{ + int i; + + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} + +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +} + +static void tg3_napi_init(struct tg3 *tp) +{ + int i; + + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +} + +static void tg3_napi_fini(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); +} + +static inline void tg3_netif_stop(struct tg3 *tp) +{ + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); +} + +static inline void tg3_netif_start(struct tg3 *tp) +{ + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); + + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} + +static void tg3_irq_quiesce(struct tg3 *tp) +{ + int i; + + BUG_ON(tp->irq_sync); + + tp->irq_sync = 1; + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); +} + +/* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. + */ +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) +{ + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); +} + +static inline void tg3_full_unlock(struct tg3 *tp) +{ + spin_unlock_bh(&tp->lock); +} + +/* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_HANDLED; +} + +/* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. + */ +static irqreturn_t tg3_msi(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(tnapi->int_mbox, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); + + return IRQ_RETVAL(1); +} + +static irqreturn_t tg3_interrupt(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } +out: + return IRQ_RETVAL(handled); +} + +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; + + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } + + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; + + if (tg3_irq_sync(tp)) + goto out; + + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + + napi_schedule(&tnapi->napi); + +out: + return IRQ_RETVAL(handled); +} + +/* ISR for interrupt test */ +static irqreturn_t tg3_test_isr(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); + } + return IRQ_RETVAL(0); +} + +static int tg3_init_hw(struct tg3 *, int); +static int tg3_halt(struct tg3 *, int, int); + +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, int reset_phy) + __releases(tp->lock) + __acquires(tp->lock) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tg3_poll_controller(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); + + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); +} +#endif + +static void tg3_reset_task(struct work_struct *work) +{ + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + unsigned int restart_timer; + + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_full_unlock(tp); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + restart_timer = tg3_flag(tp, RESTART_TIMER); + tg3_flag_clear(tp, RESTART_TIMER); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, 1); + if (err) + goto out; + + tg3_netif_start(tp); + + if (restart_timer) + mod_timer(&tp->timer, jiffies + 1); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); +} + +static void tg3_tx_timeout(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); + } + + schedule_work(&tp->reset_task); +} + +/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ +static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) +{ + u32 base = (u32) mapping & 0xffffffff; + + return (base > 0xffffdcc0) && (base + len + 8 < base); +} + +/* Test for DMA addresses > 40-bit */ +static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) +{ +#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; +#else + return 0; +#endif +} + +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); +} + +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + bool hwbug = false; + + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = 1; + + if (tg3_4g_overflow_test(map, len)) + hwbug = 1; + + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = 1; + + if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > TG3_TX_BD_DMA_MAX) { + u32 frag_len = TG3_TX_BD_DMA_MAX; + len -= TG3_TX_BD_DMA_MAX; + + if (len) { + tnapi->tx_buffers[*entry].fragmented = true; + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += TG3_TX_BD_DMA_MAX / 2; + frag_len = TG3_TX_BD_DMA_MAX / 2; + } + } else + tmp_flag = flags; + + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + break; + } + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + (*budget)--; + *entry = NEXT_TX(*entry); + } else { + hwbug = 1; + } + } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); + } + + return hwbug; +} + +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +{ + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; + + skb = txb->skb; + txb->skb = NULL; + + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + + for (i = 0; i < last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } +} + +/* Workaround 4GB and 40-bit hardware DMA bugs. */ +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff *skb, ++ struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb; ++ struct sk_buff *new_skb, *skb = *pskb; + dma_addr_t new_addr = 0; + int ret = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); + + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } + + if (!new_skb) { + ret = -1; + } else { + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb(new_skb); + ret = -1; + } else { + base_flags |= TXD_FLAG_END; + + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); + + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, *entry, 0); + dev_kfree_skb(new_skb); + ret = -1; + } + } + } + + dev_kfree_skb(skb); - ++ *pskb = new_skb; + return ret; +} + +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); + +/* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) +{ + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) + return NETDEV_TX_BUSY; + + netif_wake_queue(tp->dev); + } + + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto tg3_tso_bug_end; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); + +tg3_tso_bug_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. + */ +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; + + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + budget = tg3_tx_avail(tnapi); + + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; + } + + entry = tnapi->tx_prod; + base_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + struct iphdr *iph; + u32 tcp_opt_len, hdr_len; + + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); + + if (skb_is_gso_v6(skb)) { + hdr_len = skb_headlen(skb) - ETH_HLEN; + } else { + u32 ip_tcp_len; + + ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); + hdr_len = ip_tcp_len + tcp_opt_len; + + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } + + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, skb); + + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcp_hdr(skb)->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } + } else { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } + } + } + + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; + + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); + } + + len = skb_headlen(skb); + + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + dev_kfree_skb(skb); + goto out_unlock; + } + + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); + + would_hit_hwbug = 0; + + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) + would_hit_hwbug = 1; + + /* Now loop through additional data fragments, and queue them. */ + if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; + + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; + + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); + + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (dma_mapping_error(&tp->pdev->dev, mapping)) + goto dma_error; + + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) + would_hit_hwbug = 1; + } + } + + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); - if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget, ++ if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto out_unlock; + } + + skb_tx_timestamp(skb); + + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); + } + +out_unlock: + mmiowb(); + + return NETDEV_TX_OK; + +dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + dev_kfree_skb(skb); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; + return NETDEV_TX_OK; +} + +static void tg3_mac_loopback(struct tg3 *tp, bool enable) +{ + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); + + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + + tw32(MAC_MODE, tp->mac_mode); + udelay(40); +} + +static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) +{ + u32 val, bmcr, mac_mode, ptest = 0; + + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, 0); + + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; + + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; + } + } + + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; + + tg3_writephy(tp, MII_BMCR, bmcr); + + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); + + udelay(40); + + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; +} + +static void tg3_set_loopback(struct net_device *dev, u32 features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } +} + +static u32 tg3_fix_features(struct net_device *dev, u32 features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static int tg3_set_features(struct net_device *dev, u32 features) +{ + u32 changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; +} + +static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) +{ + dev->mtu = new_mtu; + + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); + } + } else { + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); + } + tg3_flag_clear(tp, JUMBO_RING_ENABLE); + } +} + +static int tg3_change_mtu(struct net_device *dev, int new_mtu) +{ + struct tg3 *tp = netdev_priv(dev); + int err; + + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; + + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; + } + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + tg3_set_mtu(dev, tp, new_mtu); + + err = tg3_restart_hw(tp, 0); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; +} + +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } +} + +/* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; + rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | + RXD_FLAG_JUMBO; + rxd->opaque = (RXD_OPAQUE_RING_JUMBO | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + for (i = 0; i < tp->rx_jumbo_pending; i++) { + if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX jumbo ring. Only %d " + "out of %d buffers were allocated " + "successfully\n", i, tp->rx_jumbo_pending); + if (i == 0) + goto initfail; + tp->rx_jumbo_pending = i; + break; + } + } + +done: + return 0; + +initfail: + tg3_rx_prodring_free(tp, tpr); + return -ENOMEM; +} + +static void tg3_rx_prodring_fini(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + kfree(tpr->rx_std_buffers); + tpr->rx_std_buffers = NULL; + kfree(tpr->rx_jmb_buffers); + tpr->rx_jmb_buffers = NULL; + if (tpr->rx_std) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp), + tpr->rx_std, tpr->rx_std_mapping); + tpr->rx_std = NULL; + } + if (tpr->rx_jmb) { + dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp), + tpr->rx_jmb, tpr->rx_jmb_mapping); + tpr->rx_jmb = NULL; + } +} + +static int tg3_rx_prodring_init(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_std_buffers) + return -ENOMEM; + + tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_STD_RING_BYTES(tp), + &tpr->rx_std_mapping, + GFP_KERNEL); + if (!tpr->rx_std) + goto err_out; + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp), + GFP_KERNEL); + if (!tpr->rx_jmb_buffers) + goto err_out; + + tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_JMB_RING_BYTES(tp), + &tpr->rx_jmb_mapping, + GFP_KERNEL); + if (!tpr->rx_jmb) + goto err_out; + } + + return 0; + +err_out: + tg3_rx_prodring_fini(tp, tpr); + return -ENOMEM; +} + +/* Free up pending packets in all rx/tx rings. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock is not held and we are not + * in an interrupt context and thus may sleep. + */ +static void tg3_free_rings(struct tg3 *tp) +{ + int i, j; + + for (j = 0; j < tp->irq_cnt; j++) { + struct tg3_napi *tnapi = &tp->napi[j]; + + tg3_rx_prodring_free(tp, &tnapi->prodring); + + if (!tnapi->tx_buffers) + continue; + + for (i = 0; i < TG3_TX_RING_SIZE; i++) { + struct sk_buff *skb = tnapi->tx_buffers[i].skb; + + if (!skb) + continue; + + tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); + + dev_kfree_skb_any(skb); + } + } +} + +/* Initialize tx/rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_init_rings(struct tg3 *tp) +{ + int i; + + /* Free up all the SKBs. */ + tg3_free_rings(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + tnapi->tx_prod = 0; + tnapi->tx_cons = 0; + if (tnapi->tx_ring) + memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES); + + tnapi->rx_rcb_ptr = 0; + if (tnapi->rx_rcb) + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + + if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + tg3_free_rings(tp); + return -ENOMEM; + } + } + + return 0; +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. + */ +static void tg3_free_consistent(struct tg3 *tp) +{ + int i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tnapi->tx_ring) { + dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES, + tnapi->tx_ring, tnapi->tx_desc_mapping); + tnapi->tx_ring = NULL; + } + + kfree(tnapi->tx_buffers); + tnapi->tx_buffers = NULL; + + if (tnapi->rx_rcb) { + dma_free_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + tnapi->rx_rcb, + tnapi->rx_rcb_mapping); + tnapi->rx_rcb = NULL; + } + + tg3_rx_prodring_fini(tp, &tnapi->prodring); + + if (tnapi->hw_status) { + dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE, + tnapi->hw_status, + tnapi->status_mapping); + tnapi->hw_status = NULL; + } + } + + if (tp->hw_stats) { + dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), + tp->hw_stats, tp->stats_mapping); + tp->hw_stats = NULL; + } +} + +/* + * Must not be invoked with interrupt sources disabled and + * the hardware shutdown down. Can sleep. + */ +static int tg3_alloc_consistent(struct tg3 *tp) +{ + int i; + + tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, + sizeof(struct tg3_hw_stats), + &tp->stats_mapping, + GFP_KERNEL); + if (!tp->hw_stats) + goto err_out; + + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + struct tg3_hw_status *sblk; + + tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, + TG3_HW_STATUS_SIZE, + &tnapi->status_mapping, + GFP_KERNEL); + if (!tnapi->hw_status) + goto err_out; + + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + sblk = tnapi->hw_status; + + if (tg3_rx_prodring_init(tp, &tnapi->prodring)) + goto err_out; + + /* If multivector TSS is enabled, vector 0 does not handle + * tx interrupts. Don't allocate any resources for it. + */ + if ((!i && !tg3_flag(tp, ENABLE_TSS)) || + (i && tg3_flag(tp, ENABLE_TSS))) { + tnapi->tx_buffers = kzalloc( + sizeof(struct tg3_tx_ring_info) * + TG3_TX_RING_SIZE, GFP_KERNEL); + if (!tnapi->tx_buffers) + goto err_out; + + tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev, + TG3_TX_RING_BYTES, + &tnapi->tx_desc_mapping, + GFP_KERNEL); + if (!tnapi->tx_ring) + goto err_out; + } + + /* + * When RSS is enabled, the status block format changes + * slightly. The "rx_jumbo_consumer", "reserved", + * and "rx_mini_consumer" members get mapped to the + * other three rx return ring producer indexes. + */ + switch (i) { + default: + tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; + break; + case 2: + tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer; + break; + case 3: + tnapi->rx_rcb_prod_idx = &sblk->reserved; + break; + case 4: + tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer; + break; + } + + /* + * If multivector RSS is enabled, vector 0 does not handle + * rx or tx interrupts. Don't allocate any resources for it. + */ + if (!i && tg3_flag(tp, ENABLE_RSS)) + continue; + + tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, + TG3_RX_RCB_RING_BYTES(tp), + &tnapi->rx_rcb_mapping, + GFP_KERNEL); + if (!tnapi->rx_rcb) + goto err_out; + + memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); + } + + return 0; + +err_out: + tg3_free_consistent(tp); + return -ENOMEM; +} + +#define MAX_WAIT_CNT 1000 + +/* To stop a block, clear the enable bit and poll till it + * clears. tp->lock is held. + */ +static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent) +{ + unsigned int i; + u32 val; + + if (tg3_flag(tp, 5705_PLUS)) { + switch (ofs) { + case RCVLSC_MODE: + case DMAC_MODE: + case MBFREE_MODE: + case BUFMGR_MODE: + case MEMARB_MODE: + /* We can't enable/disable these bits of the + * 5705/5750, just say success. + */ + return 0; + + default: + break; + } + } + + val = tr32(ofs); + val &= ~enable_bit; + tw32_f(ofs, val); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + val = tr32(ofs); + if ((val & enable_bit) == 0) + break; + } + + if (i == MAX_WAIT_CNT && !silent) { + dev_err(&tp->pdev->dev, + "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", + ofs, enable_bit); + return -ENODEV; + } + + return 0; +} + +/* tp->lock is held. */ +static int tg3_abort_hw(struct tg3 *tp, int silent) +{ + int i, err; + + tg3_disable_ints(tp); + + tp->rx_mode &= ~RX_MODE_ENABLE; + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent); + + err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent); + + tp->mac_mode &= ~MAC_MODE_TDE_ENABLE; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + + tp->tx_mode &= ~TX_MODE_ENABLE; + tw32_f(MAC_TX_MODE, tp->tx_mode); + + for (i = 0; i < MAX_WAIT_CNT; i++) { + udelay(100); + if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE)) + break; + } + if (i >= MAX_WAIT_CNT) { + dev_err(&tp->pdev->dev, + "%s timed out, TX_MODE_ENABLE will not clear " + "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE)); + err |= -ENODEV; + } + + err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent); + + tw32(FTQ_RESET, 0xffffffff); + tw32(FTQ_RESET, 0x00000000); + + err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent); + err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + } + if (tp->hw_stats) + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + + return err; +} + +/* Save PCI command register before chip reset */ +static void tg3_save_pci_state(struct tg3 *tp) +{ + pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd); +} + +/* Restore PCI state after chip reset */ +static void tg3_restore_pci_state(struct tg3 *tp) +{ + u32 val; + + /* Re-enable indirect register accesses. */ + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + /* Set MAX PCI retry to zero. */ + val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE); + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) + val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tg3_flag(tp, ENABLE_APE)) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); + + pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { + if (tg3_flag(tp, PCI_EXPRESS)) + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + else { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + } + + /* Make sure PCI-X relaxed ordering bit is clear. */ + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + if (tg3_flag(tp, 5780_CLASS)) { + + /* Chip reset on 5780 will reset MSI enable bit, + * so need to restore it. + */ + if (tg3_flag(tp, USING_MSI)) { + u16 ctrl; + + pci_read_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + &ctrl); + pci_write_config_word(tp->pdev, + tp->msi_cap + PCI_MSI_FLAGS, + ctrl | PCI_MSI_FLAGS_ENABLE); + val = tr32(MSGINT_MODE); + tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE); + } + } +} + +/* tp->lock is held. */ +static int tg3_chip_reset(struct tg3 *tp) +{ + u32 val; + void (*write_op)(struct tg3 *, u32, u32); + int i, err; + + tg3_nvram_lock(tp); + + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); + + /* No matching tg3_nvram_unlock() after this because + * chip reset below will undo the nvram lock. + */ + tp->nvram_lock_cnt = 0; + + /* GRC_MISC_CFG core clock reset will clear the memory + * enable bit in PCI register 4 and the MSI enable bit + * on some chips, so we save relevant registers here. + */ + tg3_save_pci_state(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 5755_PLUS)) + tw32(GRC_FASTBOOT_PC, 0); + + /* + * We must avoid the readl() that normally takes place. + * It locks machines, causes machine checks, and other + * fun things. So, temporarily disable the 5701 + * hardware workaround, while we do the reset. + */ + write_op = tp->write32; + if (write_op == tg3_write_flush_reg32) + tp->write32 = tg3_write32; + + /* Prevent the irq handler from reading or writing PCI registers + * during chip reset when the memory enable bit in the PCI command + * register may be cleared. The chip does not generate interrupt + * at this time, but the irq handler may still be called due to irq + * sharing or irqpoll. + */ + tg3_flag_set(tp, CHIP_RESETTING); + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + if (tnapi->hw_status) { + tnapi->hw_status->status = 0; + tnapi->hw_status->status_tag = 0; + } + tnapi->last_tag = 0; + tnapi->last_irq_tag = 0; + } + smp_mb(); + + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + /* do the reset */ + val = GRC_MISC_CFG_CORECLK_RESET; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Force PCIe 1.0a mode */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS) && + tr32(TG3_PCIE_PHY_TSTCTL) == + (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM)) + tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM); + + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { + tw32(GRC_MISC_CFG, (1 << 29)); + val |= (1 << 29); + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET); + tw32(GRC_VCPU_EXT_CTRL, + tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU); + } + + /* Manage gphy power for all CPMU absent PCIe devices. */ + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT)) + val |= GRC_MISC_CFG_KEEP_GPHY_POWER; + + tw32(GRC_MISC_CFG, val); + + /* restore 5701 hardware bug workaround write method */ + tp->write32 = write_op; + + /* Unfortunately, we have to delay before the PCI read back. + * Some 575X chips even will not respond to a PCI cfg access + * when the reset command is given to the chip. + * + * How do these hardware designers expect things to work + * properly if the PCI write is posted for a long period + * of time? It is always necessary to have some method by + * which a register read back can occur to push the write + * out which does the reset. + * + * For most tg3 variants the trick below was working. + * Ho hum... + */ + udelay(120); + + /* Flush PCI posted writes. The normal MMIO registers + * are inaccessible at this time so this is the only + * way to make this reliably (actually, this is no longer + * the case, see above). I tried to use indirect + * register read/write but this upset some 5701 variants. + */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, &val); + + udelay(120); + + if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) { + u16 val16; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { + int i; + u32 cfg_val; + + /* Wait for link training to complete. */ + for (i = 0; i < 5000; i++) + udelay(100); + + pci_read_config_dword(tp->pdev, 0xc4, &cfg_val); + pci_write_config_dword(tp->pdev, 0xc4, + cfg_val | (1 << 15)); + } + + /* Clear the "no snoop" and "relaxed ordering" bits. */ + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + &val16); + val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN); + /* + * Older PCIe devices only support the 128 byte + * MPS setting. Enforce the restriction. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) + val16 &= ~PCI_EXP_DEVCTL_PAYLOAD; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, + val16); + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + /* Clear error status */ + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_URD); + } + + tg3_restore_pci_state(tp); + + tg3_flag_clear(tp, CHIP_RESETTING); + tg3_flag_clear(tp, ERROR_PROCESSED); + + val = 0; + if (tg3_flag(tp, 5780_CLASS)) + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) { + tg3_stop_fw(tp); + tw32(0x5000, 0x400); + } + + tw32(GRC_MODE, tp->grc_mode); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { + val = tr32(0xc4); + + tw32(0xc4, val | (1 << 15)); + } + + if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE; + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) + tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN; + tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_TBI; + val = tp->mac_mode; + } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->mac_mode = MAC_MODE_PORT_MODE_GMII; + val = tp->mac_mode; + } else + val = 0; + + tw32_f(MAC_MODE, val); + udelay(40); + + tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); + + err = tg3_poll_fw(tp); + if (err) + return err; + + tg3_mdio_start(tp); + + if (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + val = tr32(0x7c00); + + tw32(0x7c00, val | (1 << 25)); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_CPMU_CLCK_ORIDE); + tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); + } + + /* Reprobe ASF enable state. */ + tg3_flag_clear(tp, ENABLE_ASF); + tg3_flag_clear(tp, ASF_NEW_HANDSHAKE); + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + tp->last_event_jiffies = jiffies; + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + } + + return 0; +} + +/* tp->lock is held. */ +static int tg3_halt(struct tg3 *tp, int kind, int silent) +{ + int err; + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, kind); + + tg3_abort_hw(tp, silent); + err = tg3_chip_reset(tp); + + __tg3_set_mac_addr(tp, 0); + + tg3_write_sig_legacy(tp, kind); + tg3_write_sig_post_reset(tp, kind); + + if (err) + return err; + + return 0; +} + +static int tg3_set_mac_addr(struct net_device *dev, void *p) +{ + struct tg3 *tp = netdev_priv(dev); + struct sockaddr *addr = p; + int err = 0, skip_mac_1 = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + if (!netif_running(dev)) + return 0; + + if (tg3_flag(tp, ENABLE_ASF)) { + u32 addr0_high, addr0_low, addr1_high, addr1_low; + + addr0_high = tr32(MAC_ADDR_0_HIGH); + addr0_low = tr32(MAC_ADDR_0_LOW); + addr1_high = tr32(MAC_ADDR_1_HIGH); + addr1_low = tr32(MAC_ADDR_1_LOW); + + /* Skip MAC addr 1 if ASF is using it. */ + if ((addr0_high != addr1_high || addr0_low != addr1_low) && + !(addr1_high == 0 && addr1_low == 0)) + skip_mac_1 = 1; + } + spin_lock_bh(&tp->lock); + __tg3_set_mac_addr(tp, skip_mac_1); + spin_unlock_bh(&tp->lock); + + return err; +} + +/* tp->lock is held. */ +static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, + dma_addr_t mapping, u32 maxlen_flags, + u32 nic_addr) +{ + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH), + ((u64) mapping >> 32)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW), + ((u64) mapping & 0xffffffff)); + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS), + maxlen_flags); + + if (!tg3_flag(tp, 5705_PLUS)) + tg3_write_mem(tp, + (bdinfo_addr + TG3_BDINFO_NIC_ADDR), + nic_addr); +} + +static void __tg3_set_rx_mode(struct net_device *); +static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) +{ + int i; + + if (!tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); + tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); + tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_TXCOL_TICKS, 0); + tw32(HOSTCC_TXMAX_FRAMES, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, ENABLE_RSS)) { + tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); + tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); + tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); + } else { + tw32(HOSTCC_RXCOL_TICKS, 0); + tw32(HOSTCC_RXMAX_FRAMES, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT, 0); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + u32 val = ec->stats_block_coalesce_usecs; + + tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); + tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); + + if (!netif_carrier_ok(tp->dev)) + val = 0; + + tw32(HOSTCC_STAT_COAL_TICKS, val); + } + + for (i = 0; i < tp->irq_cnt - 1; i++) { + u32 reg; + + reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->rx_coalesce_usecs); + reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames); + reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->rx_max_coalesced_frames_irq); + + if (tg3_flag(tp, ENABLE_TSS)) { + reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; + tw32(reg, ec->tx_coalesce_usecs); + reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames); + reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; + tw32(reg, ec->tx_max_coalesced_frames_irq); + } + } + + for (; i < tp->irq_max - 1; i++) { + tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + + if (tg3_flag(tp, ENABLE_TSS)) { + tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); + tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); + } + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk, txrcb, rxrcb, limit; + struct tg3_napi *tnapi = &tp->napi[0]; + + /* Disable all transmit rings but the first. */ + if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; + else if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; + else + limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + + for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; + txrcb < limit; txrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + + /* Disable all receive return rings but the first. */ + if (tg3_flag(tp, 5717_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; + else if (!tg3_flag(tp, 5705_PLUS)) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; + else + limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + + for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; + rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) + tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Disable interrupts */ + tw32_mailbox_f(tp->napi[0].int_mbox, 1); + tp->napi[0].chk_msi_cnt = 0; + tp->napi[0].last_rx_cons = 0; + tp->napi[0].last_tx_cons = 0; + + /* Zero mailbox registers. */ + if (tg3_flag(tp, SUPPORT_MSIX)) { + for (i = 1; i < tp->irq_max; i++) { + tp->napi[i].tx_prod = 0; + tp->napi[i].tx_cons = 0; + if (tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[i].prodmbox, 0); + tw32_rx_mbox(tp->napi[i].consmbox, 0); + tw32_mailbox_f(tp->napi[i].int_mbox, 1); + tp->napi[i].chk_msi_cnt = 0; + tp->napi[i].last_rx_cons = 0; + tp->napi[i].last_tx_cons = 0; + } + if (!tg3_flag(tp, ENABLE_TSS)) + tw32_mailbox(tp->napi[0].prodmbox, 0); + } else { + tp->napi[0].tx_prod = 0; + tp->napi[0].tx_cons = 0; + tw32_mailbox(tp->napi[0].prodmbox, 0); + tw32_rx_mbox(tp->napi[0].consmbox, 0); + } + + /* Make sure the NIC-based send BD rings are disabled. */ + if (!tg3_flag(tp, 5705_PLUS)) { + u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < 16; i++) + tw32_tx_mbox(mbox + i * 8, 0); + } + + txrcb = NIC_SRAM_SEND_RCB; + rxrcb = NIC_SRAM_RCV_RET_RCB; + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + /* Set status block DMA address */ + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tnapi->status_mapping >> 32)); + tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tnapi->status_mapping & 0xffffffff)); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + if (tnapi->rx_rcb) { + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + rxrcb += TG3_BDINFO_SIZE; + } + + stblk = HOSTCC_STATBLCK_RING1; + + for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { + u64 mapping = (u64)tnapi->status_mapping; + tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); + tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + + /* Clear status block in ram. */ + memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); + + if (tnapi->tx_ring) { + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << + BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + txrcb += TG3_BDINFO_SIZE; + } + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + ((tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT), 0); + + stblk += 8; + rxrcb += TG3_BDINFO_SIZE; + } +} + +static void tg3_setup_rxbd_thresholds(struct tg3 *tp) +{ + u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh; + + if (!tg3_flag(tp, 5750_PLUS) || + tg3_flag(tp, 5780_CLASS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755; + else + bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906; + + nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post); + host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1); + + val = min(nic_rep_thresh, host_rep_thresh); + tw32(RCVBDI_STD_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(STD_REPLENISH_LWM, bdcache_maxcnt); + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; + else + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + + host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); + + val = min(bdcache_maxcnt / 2, host_rep_thresh); + tw32(RCVBDI_JUMBO_THRESH, val); + + if (tg3_flag(tp, 57765_PLUS)) + tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); +} + +/* tp->lock is held. */ +static int tg3_reset_hw(struct tg3 *tp, int reset_phy) +{ + u32 val, rdmac_mode; + int i, err, limit; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tg3_disable_ints(tp); + + tg3_stop_fw(tp); + + tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); + + if (tg3_flag(tp, INIT_COMPLETE)) + tg3_abort_hw(tp, 1); + + /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, + TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + TG3_CPMU_EEEMD_LPI_IN_TX | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, val); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + TG3_CPMU_DBTMR1_LNKIDLE_2047US); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + + if (reset_phy) + tg3_phy_reset(tp); + + err = tg3_chip_reset(tp); + if (err) + return err; + + tg3_write_sig_legacy(tp, RESET_KIND_INIT); + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + + val = tr32(TG3_CPMU_LNK_AWARE_PWRMD); + val &= ~CPMU_LNK_AWARE_MACCLK_MASK; + val |= CPMU_LNK_AWARE_MACCLK_6_25; + tw32(TG3_CPMU_LNK_AWARE_PWRMD, val); + + val = tr32(TG3_CPMU_HST_ACC); + val &= ~CPMU_HST_ACC_MACCLK_MASK; + val |= CPMU_HST_ACC_MACCLK_6_25; + tw32(TG3_CPMU_HST_ACC, val); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK; + val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN | + PCIE_PWR_MGMT_L1_THRESH_4MS; + tw32(PCIE_PWR_MGMT_THRESH, val); + + val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK; + tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); + + tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); + + val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; + tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); + } + + if (tg3_flag(tp, L1PLLPD_EN)) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1, + val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of PL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_PL_LO_PHYCTL5); + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, + val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); + + tw32(GRC_MODE, grc_mode); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) { + u32 grc_mode = tr32(GRC_MODE); + + /* Access the lower 1K of DL PCIE block registers. */ + val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; + tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL); + + val = tr32(TG3_PCIE_TLDLPL_PORT + + TG3_PCIE_DL_LO_FTSMAX); + val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK; + tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX, + val | TG3_PCIE_DL_LO_FTSMAX_VAL); + + tw32(GRC_MODE, grc_mode); + } + + val = tr32(TG3_CPMU_LSPD_10MB_CLK); + val &= ~CPMU_LSPD_10MB_MACCLK_MASK; + val |= CPMU_LSPD_10MB_MACCLK_6_25; + tw32(TG3_CPMU_LSPD_10MB_CLK, val); + } + + /* This works around an issue with Athlon chipsets on + * B3 tigon3 silicon. This bit has no effect on any + * other revision. But do not set this on PCI Express + * chips and don't even touch the clocks if the CPMU is present. + */ + if (!tg3_flag(tp, CPMU_PRESENT)) { + if (!tg3_flag(tp, PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && + tg3_flag(tp, PCIX_MODE)) { + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_RETRY_SAME_DMA; + tw32(TG3PCI_PCISTATE, val); + } + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + tw32(TG3PCI_PCISTATE, val); + } + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { + /* Enable some hw fixes. */ + val = tr32(TG3PCI_MSI_DATA); + val |= (1 << 26) | (1 << 28) | (1 << 29); + tw32(TG3PCI_MSI_DATA, val); + } + + /* Descriptor ring init may make accesses to the + * NIC SRAM area to setup the TX descriptors, so we + * can only do this after the hardware has been + * successfully reset. + */ + err = tg3_init_rings(tp); + if (err) + return err; + + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3PCI_DMA_RW_CTRL) & + ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) + val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) + val |= DMA_RWCTRL_TAGGED_STAT_WA; + tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + + tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | + GRC_MODE_4X_NIC_SEND_RINGS | + GRC_MODE_NO_TX_PHDR_CSUM | + GRC_MODE_NO_RX_PHDR_CSUM); + tp->grc_mode |= GRC_MODE_HOST_SENDBDS; + + /* Pseudo-header checksum is done by hardware logic and not + * the offload processers, so make the chip do the pseudo- + * header checksums on receive. For transmit it is more + * convenient to do the pseudo-header checksum in software + * as Linux does that on transmit for us in all cases. + */ + tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; + + tw32(GRC_MODE, + tp->grc_mode | + (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); + + /* Setup the timer prescalar register. Clock is always 66Mhz. */ + val = tr32(GRC_MISC_CFG); + val &= ~0xff; + val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); + + /* Initialize MBUF/DESC pool. */ + if (tg3_flag(tp, 5750_PLUS)) { + /* Do nothing. */ + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { + tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64); + else + tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); + tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); + tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); + } else if (tg3_flag(tp, TSO_CAPABLE)) { + int fw_len; + + fw_len = tp->fw_len; + fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1); + tw32(BUFMGR_MB_POOL_ADDR, + NIC_SRAM_MBUF_POOL_BASE5705 + fw_len); + tw32(BUFMGR_MB_POOL_SIZE, + NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); + } + + if (tp->dev->mtu <= ETH_DATA_LEN) { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water); + } else { + tw32(BUFMGR_MB_RDMA_LOW_WATER, + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo); + tw32(BUFMGR_MB_MACRX_LOW_WATER, + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo); + tw32(BUFMGR_MB_HIGH_WATER, + tp->bufmgr_config.mbuf_high_water_jumbo); + } + tw32(BUFMGR_DMA_LOW_WATER, + tp->bufmgr_config.dma_low_water); + tw32(BUFMGR_DMA_HIGH_WATER, + tp->bufmgr_config.dma_high_water); + + val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + val |= BUFMGR_MODE_NO_TX_UNDERRUN; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) + val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; + tw32(BUFMGR_MODE, val); + for (i = 0; i < 2000; i++) { + if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) + break; + udelay(10); + } + if (i >= 2000) { + netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__); + return -ENODEV; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1) + tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2); + + tg3_setup_rxbd_thresholds(tp); + + /* Initialize TG3_BDINFO's at: + * RCVDBDI_STD_BD: standard eth size rx ring + * RCVDBDI_JUMBO_BD: jumbo frame rx ring + * RCVDBDI_MINI_BD: small frame rx ring (??? does not work) + * + * like so: + * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring + * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) | + * ring attribute flags + * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM + * + * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries. + * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries. + * + * The size of each ring is fixed in the firmware, but the location is + * configurable. + */ + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_std_mapping >> 32)); + tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_std_mapping & 0xffffffff)); + if (!tg3_flag(tp, 5717_PLUS)) + tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_BUFFER_DESC); + + /* Disable the mini ring */ + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + + /* Program the jumbo buffer descriptor ring control + * blocks on those devices that have them. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) { + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tpr->rx_jmb_mapping >> 32)); + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tpr->rx_jmb_mapping & 0xffffffff)); + val = TG3_RX_JMB_RING_SIZE(tp) << + BDINFO_FLAGS_MAXLEN_SHIFT; + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + val | BDINFO_FLAGS_USE_EXT_RECV); + if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, + NIC_SRAM_RX_JUMBO_BUFFER_DESC); + } else { + tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, + BDINFO_FLAGS_DISABLED); + } + + if (tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = TG3_RX_STD_MAX_SIZE_5700; + else + val = TG3_RX_STD_MAX_SIZE_5717; + val <<= BDINFO_FLAGS_MAXLEN_SHIFT; + val |= (TG3_RX_STD_DMA_SZ << 2); + } else + val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; + } else + val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT; + + tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); + + tpr->rx_std_prod_idx = tp->rx_pending; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); + + tpr->rx_jmb_prod_idx = + tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); + + tg3_rings_reset(tp); + + /* Initialize MAC address and backoff seed. */ + __tg3_set_mac_addr(tp, 0); + + /* MTU + ethernet header + FCS + optional VLAN tag */ + tw32(MAC_RX_MTU_SIZE, + tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + /* The slot time is changed by tg3_setup_phy if we + * run at gigabit with half duplex. + */ + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT) | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); + + tw32(MAC_TX_LENGTHS, val); + + /* Receive rules. */ + tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS); + tw32(RCVLPC_CONFIG, 0x0181); + + /* Calculate RDMAC_MODE setting early, we need it to determine + * the RCVLPC_STATE_ENABLE mask. + */ + rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | + RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | + RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | + RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | + RDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) + rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + } + } + + if (tg3_flag(tp, PCI_EXPRESS)) + rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; + + if (tg3_flag(tp, 57765_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) { + val = tr32(TG3_RDMA_RSRVCTRL_REG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; + } + tw32(TG3_RDMA_RSRVCTRL_REG, + val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL); + tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K | + TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K); + } + + /* Receive/send statistics. */ + if (tg3_flag(tp, 5750_PLUS)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_DACK_FIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) && + tg3_flag(tp, TSO_CAPABLE)) { + val = tr32(RCVLPC_STATS_ENABLE); + val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX; + tw32(RCVLPC_STATS_ENABLE, val); + } else { + tw32(RCVLPC_STATS_ENABLE, 0xffffff); + } + tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE); + tw32(SNDDATAI_STATSENAB, 0xffffff); + tw32(SNDDATAI_STATSCTRL, + (SNDDATAI_SCTRL_ENABLE | + SNDDATAI_SCTRL_FASTUPD)); + + /* Setup host coalescing engine. */ + tw32(HOSTCC_MODE, 0); + for (i = 0; i < 2000; i++) { + if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) + break; + udelay(10); + } + + __tg3_set_coalesce(tp, &tp->coal); + + if (!tg3_flag(tp, 5705_PLUS)) { + /* Status/statistics block address. See tg3_timer, + * the tg3_periodic_fetch_stats call there, and + * tg3_get_stats to see how this works for 5705/5750 chips. + */ + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, + ((u64) tp->stats_mapping >> 32)); + tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, + ((u64) tp->stats_mapping & 0xffffffff)); + tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK); + + tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK); + + /* Clear statistics and status block memory areas */ + for (i = NIC_SRAM_STATS_BLK; + i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE; + i += sizeof(u32)) { + tg3_write_mem(tp, i, 0); + udelay(40); + } + } + + tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode); + + tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE); + tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + + tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | + MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | + MAC_MODE_FHDE_ENABLE; + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + if (!tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); + udelay(40); + + /* tp->grc_local_ctrl is partially set up during tg3_get_invariants(). + * If TG3_FLAG_IS_NIC is zero, we should read the + * register to preserve the GPIO settings for LOMs. The GPIOs, + * whether used as inputs or outputs, are set by boot code after + * reset. + */ + if (!tg3_flag(tp, IS_NIC)) { + u32 gpio_mask; + + gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 | + GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + gpio_mask |= GRC_LCLCTRL_GPIO_OE3 | + GRC_LCLCTRL_GPIO_OUTPUT3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL; + + tp->grc_local_ctrl &= ~gpio_mask; + tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask; + + /* GPIO1 must be driven high for eeprom write protect */ + if (tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + } + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(100); + + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + val = tr32(MSGINT_MODE); + val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + if (!tg3_flag(tp, 1SHOT_MSI)) + val |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + if (!tg3_flag(tp, 5705_PLUS)) { + tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); + udelay(40); + } + + val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | + WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | + WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | + WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | + WDMAC_MODE_LNGREAD_ENAB); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + if (tg3_flag(tp, TSO_CAPABLE) && + (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || + tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { + /* nothing */ + } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && + !tg3_flag(tp, IS_5788)) { + val |= WDMAC_MODE_RX_ACCEL; + } + } + + /* Enable host coalescing bug fix */ + if (tg3_flag(tp, 5755_PLUS)) + val |= WDMAC_MODE_STATUS_TAG_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + val |= WDMAC_MODE_BURST_ALL_DATA; + + tw32_f(WDMAC_MODE, val); + udelay(40); + + if (tg3_flag(tp, PCIX_MODE)) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; + } + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } + + tw32_f(RDMAC_MODE, rdmac_mode); + udelay(40); + + tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); + if (!tg3_flag(tp, 5705_PLUS)) + tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); + tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); + val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ; + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + val |= RCVDBDI_MODE_LRG_RING_SZ; + tw32(RCVDBDI_MODE, val); + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); + val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; + if (tg3_flag(tp, ENABLE_TSS)) + val |= SNDBDI_MODE_MULTI_TXQ_EN; + tw32(SNDBDI_MODE, val); + tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + err = tg3_load_5701_a0_firmware_fix(tp); + if (err) + return err; + } + + if (tg3_flag(tp, TSO_CAPABLE)) { + err = tg3_load_tso_firmware(tp); + if (err) + return err; + } + + tp->tx_mode = TX_MODE_ENABLE; + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE; + tp->tx_mode &= ~val; + tp->tx_mode |= tr32(MAC_TX_MODE) & val; + } + + tw32_f(MAC_TX_MODE, tp->tx_mode); + udelay(100); + + if (tg3_flag(tp, ENABLE_RSS)) { + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + if (tp->irq_cnt == 2) { + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { + tw32(reg, 0x0); + reg += 4; + } + } else { + u32 val; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + val = i % (tp->irq_cnt - 1); + i++; + for (; i % 8; i++) { + val <<= 4; + val |= (i % (tp->irq_cnt - 1)); + } + tw32(reg, val); + reg += 4; + } + } + + /* Setup the "secret" hash key. */ + tw32(MAC_RSS_HASH_KEY_0, 0x5f865437); + tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc); + tw32(MAC_RSS_HASH_KEY_2, 0x50103a45); + tw32(MAC_RSS_HASH_KEY_3, 0x36621985); + tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8); + tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e); + tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556); + tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe); + tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7); + tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481); + } + + tp->rx_mode = RX_MODE_ENABLE; + if (tg3_flag(tp, 5755_PLUS)) + tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; + + if (tg3_flag(tp, ENABLE_RSS)) + tp->rx_mode |= RX_MODE_RSS_ENABLE | + RX_MODE_RSS_ITBL_HASH_BITS_7 | + RX_MODE_RSS_IPV6_HASH_EN | + RX_MODE_RSS_TCP_IPV6_HASH_EN | + RX_MODE_RSS_IPV4_HASH_EN | + RX_MODE_RSS_TCP_IPV4_HASH_EN; + + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + tw32(MAC_LED_CTRL, tp->led_ctrl); + + tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tw32_f(MAC_RX_MODE, tp->rx_mode); + udelay(10); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) && + !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) { + /* Set drive transmission level to 1.2V */ + /* only if the signal pre-emphasis bit is not set */ + val = tr32(MAC_SERDES_CFG); + val &= 0xfffff000; + val |= 0x880; + tw32(MAC_SERDES_CFG, val); + } + if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) + tw32(MAC_SERDES_CFG, 0x616000); + } + + /* Prevent chip from dropping frames when flow control + * is enabled. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + val = 1; + else + val = 2; + tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + /* Use hardware link auto-negotiation */ + tg3_flag_set(tp, HW_AUTONEG); + } + + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + u32 tmp; + + tmp = tr32(SERDES_RX_CTRL); + tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); + tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; + tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; + tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + } + + if (!tg3_flag(tp, USE_PHYLIB)) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; + tp->link_config.speed = tp->link_config.orig_speed; + tp->link_config.duplex = tp->link_config.orig_duplex; + tp->link_config.autoneg = tp->link_config.orig_autoneg; + } + + err = tg3_setup_phy(tp, 0); + if (err) + return err; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + u32 tmp; + + /* Clear CRC stats. */ + if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) { + tg3_writephy(tp, MII_TG3_TEST1, + tmp | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp); + } + } + } + + __tg3_set_rx_mode(tp->dev); + + /* Initialize receive rules. */ + tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); + tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); + + if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) + limit = 8; + else + limit = 16; + if (tg3_flag(tp, ENABLE_ASF)) + limit -= 4; + switch (limit) { + case 16: + tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); + case 15: + tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); + case 14: + tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); + case 13: + tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); + case 12: + tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); + case 11: + tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); + case 10: + tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); + case 9: + tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); + case 8: + tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); + case 7: + tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); + case 6: + tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); + case 5: + tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); + case 4: + /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ + case 3: + /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ + case 2: + case 1: + + default: + break; + } + + if (tg3_flag(tp, ENABLE_APE)) + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); + + return 0; +} + +/* Called at device open time to get the chip ready for + * packet processing. Invoked with tp->lock held. + */ +static int tg3_init_hw(struct tg3 *tp, int reset_phy) +{ + tg3_switch_clocks(tp); + + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + return tg3_reset_hw(tp, reset_phy); +} + +#define TG3_STAT_ADD32(PSTAT, REG) \ +do { u32 __val = tr32(REG); \ + (PSTAT)->low += __val; \ + if ((PSTAT)->low < __val) \ + (PSTAT)->high += 1; \ +} while (0) + +static void tg3_periodic_fetch_stats(struct tg3 *tp) +{ + struct tg3_hw_stats *sp = tp->hw_stats; + + if (!netif_carrier_ok(tp->dev)) + return; + + TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT); + TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT); + TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS); + TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS); + TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED); + TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL); + TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL); + TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST); + TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST); + TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST); + + TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS); + TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS); + TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST); + TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST); + TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST); + TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS); + TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS); + TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD); + TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD); + TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED); + TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG); + TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS); + TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE); + + TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) { + TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); + } else { + u32 val = tr32(HOSTCC_FLOW_ATTN); + val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0; + if (val) { + tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM); + sp->rx_discards.low += val; + if (sp->rx_discards.low < val) + sp->rx_discards.high += 1; + } + sp->mbuf_lwm_thresh_hit = sp->rx_discards; + } + TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT); +} + +static void tg3_chk_missed_msi(struct tg3 *tp) +{ + u32 i; + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (tg3_has_work(tnapi)) { + if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr && + tnapi->last_tx_cons == tnapi->tx_cons) { + if (tnapi->chk_msi_cnt < 1) { + tnapi->chk_msi_cnt++; + return; + } + tg3_msi(0, tnapi); + } + } + tnapi->chk_msi_cnt = 0; + tnapi->last_rx_cons = tnapi->rx_rcb_ptr; + tnapi->last_tx_cons = tnapi->tx_cons; + } +} + +static void tg3_timer(unsigned long __opaque) +{ + struct tg3 *tp = (struct tg3 *) __opaque; + + if (tp->irq_sync) + goto restart_timer; + + spin_lock(&tp->lock); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_chk_missed_msi(tp); + + if (!tg3_flag(tp, TAGGED_STATUS)) { + /* All of this garbage is because when using non-tagged + * IRQ status the mailbox/status_block protocol the chip + * uses with the cpu is race prone. + */ + if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) { + tw32(GRC_LOCAL_CTRL, + tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); + } else { + tw32(HOSTCC_MODE, tp->coalesce_mode | + HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW); + } + + if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tg3_flag_set(tp, RESTART_TIMER); + spin_unlock(&tp->lock); + schedule_work(&tp->reset_task); + return; + } + } + + /* This part only runs once per second. */ + if (!--tp->timer_counter) { + if (tg3_flag(tp, 5705_PLUS)) + tg3_periodic_fetch_stats(tp); + + if (tp->setlpicnt && !--tp->setlpicnt) + tg3_phy_eee_enable(tp); + + if (tg3_flag(tp, USE_LINKCHG_REG)) { + u32 mac_stat; + int phy_event; + + mac_stat = tr32(MAC_STATUS); + + phy_event = 0; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) { + if (mac_stat & MAC_STATUS_MI_INTERRUPT) + phy_event = 1; + } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) + phy_event = 1; + + if (phy_event) + tg3_setup_phy(tp, 0); + } else if (tg3_flag(tp, POLL_SERDES)) { + u32 mac_stat = tr32(MAC_STATUS); + int need_setup = 0; + + if (netif_carrier_ok(tp->dev) && + (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { + need_setup = 1; + } + if (!netif_carrier_ok(tp->dev) && + (mac_stat & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET))) { + need_setup = 1; + } + if (need_setup) { + if (!tp->serdes_counter) { + tw32_f(MAC_MODE, + (tp->mac_mode & + ~MAC_MODE_PORT_MODE_MASK)); + udelay(40); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); + } + tg3_setup_phy(tp, 0); + } + } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tg3_serdes_parallel_detect(tp); + } + + tp->timer_counter = tp->timer_multiplier; + } + + /* Heartbeat is only sent once every 2 seconds. + * + * The heartbeat is to tell the ASF firmware that the host + * driver is still alive. In the event that the OS crashes, + * ASF needs to reset the hardware to free up the FIFO space + * that may be filled with rx packets destined for the host. + * If the FIFO is full, ASF will no longer function properly. + * + * Unintended resets have been reported on real time kernels + * where the timer doesn't run on time. Netpoll will also have + * same problem. + * + * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware + * to check the ring condition when the heartbeat is expiring + * before doing the reset. This will prevent most unintended + * resets. + */ + if (!--tp->asf_counter) { + if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) { + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, + FWCMD_NICDRV_ALIVE3); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, + TG3_FW_UPDATE_TIMEOUT_SEC); + + tg3_generate_fw_event(tp); + } + tp->asf_counter = tp->asf_multiplier; + } + + spin_unlock(&tp->lock); + +restart_timer: + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); +} + +static int tg3_request_irq(struct tg3 *tp, int irq_num) +{ + irq_handler_t fn; + unsigned long flags; + char *name; + struct tg3_napi *tnapi = &tp->napi[irq_num]; + + if (tp->irq_cnt == 1) + name = tp->dev->name; + else { + name = &tnapi->irq_lbl[0]; + snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); + name[IFNAMSIZ-1] = 0; + } + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + fn = tg3_msi; + if (tg3_flag(tp, 1SHOT_MSI)) + fn = tg3_msi_1shot; + flags = 0; + } else { + fn = tg3_interrupt; + if (tg3_flag(tp, TAGGED_STATUS)) + fn = tg3_interrupt_tagged; + flags = IRQF_SHARED; + } + + return request_irq(tnapi->irq_vec, fn, flags, name, tnapi); +} + +static int tg3_test_interrupt(struct tg3 *tp) +{ + struct tg3_napi *tnapi = &tp->napi[0]; + struct net_device *dev = tp->dev; + int err, i, intr_ok = 0; + u32 val; + + if (!netif_running(dev)) + return -ENODEV; + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + /* + * Turn off MSI one shot mode. Otherwise this test has no + * observable way to know whether the interrupt was delivered. + */ + if (tg3_flag(tp, 57765_PLUS)) { + val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + + err = request_irq(tnapi->irq_vec, tg3_test_isr, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); + if (err) + return err; + + tnapi->hw_status->status &= ~SD_STATUS_UPDATED; + tg3_enable_ints(tp); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + tnapi->coal_now); + + for (i = 0; i < 5; i++) { + u32 int_mbox, misc_host_ctrl; + + int_mbox = tr32_mailbox(tnapi->int_mbox); + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + + if ((int_mbox != 0) || + (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) { + intr_ok = 1; + break; + } + + if (tg3_flag(tp, 57765_PLUS) && + tnapi->hw_status->status_tag != tnapi->last_tag) + tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); + + msleep(10); + } + + tg3_disable_ints(tp); + + free_irq(tnapi->irq_vec, tnapi); + + err = tg3_request_irq(tp, 0); + + if (err) + return err; + + if (intr_ok) { + /* Reenable MSI one shot mode. */ + if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) { + val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, val); + } + return 0; + } + + return -EIO; +} + +/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is + * successfully restored + */ +static int tg3_test_msi(struct tg3 *tp) +{ + int err; + u16 pci_cmd; + + if (!tg3_flag(tp, USING_MSI)) + return 0; + + /* Turn off SERR reporting in case MSI terminates with Master + * Abort. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_write_config_word(tp->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = tg3_test_interrupt(tp); + + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + if (!err) + return 0; + + /* other failures */ + if (err != -EIO) + return err; + + /* MSI test failed, go back to INTx mode */ + netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching " + "to INTx mode. Please report this failure to the PCI " + "maintainer and include system chipset information\n"); + + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + pci_disable_msi(tp->pdev); + + tg3_flag_clear(tp, USING_MSI); + tp->napi[0].irq_vec = tp->pdev->irq; + + err = tg3_request_irq(tp, 0); + if (err) + return err; + + /* Need to reset the chip because the MSI cycle may have terminated + * with Master Abort. + */ + tg3_full_lock(tp, 1); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_init_hw(tp, 1); + + tg3_full_unlock(tp); + + if (err) + free_irq(tp->napi[0].irq_vec, &tp->napi[0]); + + return err; +} + +static int tg3_request_firmware(struct tg3 *tp) +{ + const __be32 *fw_data; + + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { + netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", + tp->fw_needed); + return -ENOENT; + } + + fw_data = (void *)tp->fw->data; + + /* Firmware blob starts with version numbers, followed by + * start address and _full_ length including BSS sections + * (which must be longer than the actual data, of course + */ + + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ + if (tp->fw_len < (tp->fw->size - 12)) { + netdev_err(tp->dev, "bogus length %d in \"%s\"\n", + tp->fw_len, tp->fw_needed); + release_firmware(tp->fw); + tp->fw = NULL; + return -EINVAL; + } + + /* We no longer need firmware; we have it. */ + tp->fw_needed = NULL; + return 0; +} + +static bool tg3_enable_msix(struct tg3 *tp) +{ + int i, rc, cpus = num_online_cpus(); + struct msix_entry msix_ent[tp->irq_max]; + + if (cpus == 1) + /* Just fallback to the simpler MSI mode. */ + return false; + + /* + * We want as many rx rings enabled as there are cpus. + * The first MSIX vector only deals with link interrupts, etc, + * so we add one to the number of vectors we are requesting. + */ + tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max); + + for (i = 0; i < tp->irq_max; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); + if (rc < 0) { + return false; + } else if (rc != 0) { + if (pci_enable_msix(tp->pdev, msix_ent, rc)) + return false; + netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", + tp->irq_cnt, rc); + tp->irq_cnt = rc; + } + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].irq_vec = msix_ent[i].vector; + + netif_set_real_num_tx_queues(tp->dev, 1); + rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1; + if (netif_set_real_num_rx_queues(tp->dev, rc)) { + pci_disable_msix(tp->pdev); + return false; + } + + if (tp->irq_cnt > 1) { + tg3_flag_set(tp, ENABLE_RSS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { + tg3_flag_set(tp, ENABLE_TSS); + netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1); + } + } + + return true; +} + +static void tg3_ints_init(struct tg3 *tp) +{ + if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) && + !tg3_flag(tp, TAGGED_STATUS)) { + /* All MSI supporting chips should support tagged + * status. Assert that this is the case. + */ + netdev_warn(tp->dev, + "MSI without TAGGED_STATUS? Not using MSI\n"); + goto defcfg; + } + + if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp)) + tg3_flag_set(tp, USING_MSIX); + else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0) + tg3_flag_set(tp, USING_MSI); + + if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) { + u32 msi_mode = tr32(MSGINT_MODE); + if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) + msi_mode |= MSGINT_MODE_MULTIVEC_EN; + if (!tg3_flag(tp, 1SHOT_MSI)) + msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE; + tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); + } +defcfg: + if (!tg3_flag(tp, USING_MSIX)) { + tp->irq_cnt = 1; + tp->napi[0].irq_vec = tp->pdev->irq; + netif_set_real_num_tx_queues(tp->dev, 1); + netif_set_real_num_rx_queues(tp->dev, 1); + } +} + +static void tg3_ints_fini(struct tg3 *tp) +{ + if (tg3_flag(tp, USING_MSIX)) + pci_disable_msix(tp->pdev); + else if (tg3_flag(tp, USING_MSI)) + pci_disable_msi(tp->pdev); + tg3_flag_clear(tp, USING_MSI); + tg3_flag_clear(tp, USING_MSIX); + tg3_flag_clear(tp, ENABLE_RSS); + tg3_flag_clear(tp, ENABLE_TSS); +} + +static int tg3_open(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int i, err; + + if (tp->fw_needed) { + err = tg3_request_firmware(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { + if (err) + return err; + } else if (err) { + netdev_warn(tp->dev, "TSO capability disabled\n"); + tg3_flag_clear(tp, TSO_CAPABLE); + } else if (!tg3_flag(tp, TSO_CAPABLE)) { + netdev_notice(tp->dev, "TSO capability restored\n"); + tg3_flag_set(tp, TSO_CAPABLE); + } + } + + netif_carrier_off(tp->dev); + + err = tg3_power_up(tp); + if (err) + return err; + + tg3_full_lock(tp, 0); + + tg3_disable_ints(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + /* + * Setup interrupts first so we know how + * many NAPI resources to allocate + */ + tg3_ints_init(tp); + + /* The placement of this call is tied + * to the setup and use of Host TX descriptors. + */ + err = tg3_alloc_consistent(tp); + if (err) + goto err_out1; + + tg3_napi_init(tp); + + tg3_napi_enable(tp); + + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + err = tg3_request_irq(tp, i); + if (err) { + for (i--; i >= 0; i--) + free_irq(tnapi->irq_vec, tnapi); + break; + } + } + + if (err) + goto err_out2; + + tg3_full_lock(tp, 0); + + err = tg3_init_hw(tp, 1); + if (err) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + } else { + if (tg3_flag(tp, TAGGED_STATUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) + tp->timer_offset = HZ; + else + tp->timer_offset = HZ / 10; + + BUG_ON(tp->timer_offset > HZ); + tp->timer_counter = tp->timer_multiplier = + (HZ / tp->timer_offset); + tp->asf_counter = tp->asf_multiplier = + ((HZ / tp->timer_offset) * 2); + + init_timer(&tp->timer); + tp->timer.expires = jiffies + tp->timer_offset; + tp->timer.data = (unsigned long) tp; + tp->timer.function = tg3_timer; + } + + tg3_full_unlock(tp); + + if (err) + goto err_out3; + + if (tg3_flag(tp, USING_MSI)) { + err = tg3_test_msi(tp); + + if (err) { + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_full_unlock(tp); + + goto err_out2; + } + + if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { + u32 val = tr32(PCIE_TRANSACTION_CFG); + + tw32(PCIE_TRANSACTION_CFG, + val | PCIE_TRANS_CFG_1SHOT_MSI); + } + } + + tg3_phy_start(tp); + + tg3_full_lock(tp, 0); + + add_timer(&tp->timer); + tg3_flag_set(tp, INIT_COMPLETE); + tg3_enable_ints(tp); + + tg3_full_unlock(tp); + + netif_tx_start_all_queues(dev); + + /* + * Reset loopback feature if it was turned on while the device was down + * make sure that it's installed properly now. + */ + if (dev->features & NETIF_F_LOOPBACK) + tg3_set_loopback(dev, dev->features); + + return 0; + +err_out3: + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + +err_out2: + tg3_napi_disable(tp); + tg3_napi_fini(tp); + tg3_free_consistent(tp); + +err_out1: + tg3_ints_fini(tp); + tg3_frob_aux_power(tp, false); + pci_set_power_state(tp->pdev, PCI_D3hot); + return err; +} + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); + +static int tg3_close(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); + + tg3_napi_disable(tp); + cancel_work_sync(&tp->reset_task); + + netif_tx_stop_all_queues(dev); + + del_timer_sync(&tp->timer); + + tg3_phy_stop(tp); + + tg3_full_lock(tp, 1); + + tg3_disable_ints(tp); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_free_rings(tp); + tg3_flag_clear(tp, INIT_COMPLETE); + + tg3_full_unlock(tp); + + for (i = tp->irq_cnt - 1; i >= 0; i--) { + struct tg3_napi *tnapi = &tp->napi[i]; + free_irq(tnapi->irq_vec, tnapi); + } + + tg3_ints_fini(tp); + + tg3_get_stats64(tp->dev, &tp->net_stats_prev); + + memcpy(&tp->estats_prev, tg3_get_estats(tp), + sizeof(tp->estats_prev)); + + tg3_napi_fini(tp); + + tg3_free_consistent(tp); + + tg3_power_down(tp); + + netif_carrier_off(tp->dev); + + return 0; +} + +static inline u64 get_stat64(tg3_stat64_t *val) +{ + return ((u64)val->high << 32) | ((u64)val->low); +} + +static u64 calc_crc_errors(struct tg3 *tp) +{ + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 val; + + spin_lock_bh(&tp->lock); + if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) { + tg3_writephy(tp, MII_TG3_TEST1, + val | MII_TG3_TEST1_CRC_EN); + tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val); + } else + val = 0; + spin_unlock_bh(&tp->lock); + + tp->phy_crc_errors += val; + + return tp->phy_crc_errors; + } + + return get_stat64(&hw_stats->rx_fcs_errors); +} + +#define ESTAT_ADD(member) \ + estats->member = old_estats->member + \ + get_stat64(&hw_stats->member) + +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp) +{ + struct tg3_ethtool_stats *estats = &tp->estats; + struct tg3_ethtool_stats *old_estats = &tp->estats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_estats; + + ESTAT_ADD(rx_octets); + ESTAT_ADD(rx_fragments); + ESTAT_ADD(rx_ucast_packets); + ESTAT_ADD(rx_mcast_packets); + ESTAT_ADD(rx_bcast_packets); + ESTAT_ADD(rx_fcs_errors); + ESTAT_ADD(rx_align_errors); + ESTAT_ADD(rx_xon_pause_rcvd); + ESTAT_ADD(rx_xoff_pause_rcvd); + ESTAT_ADD(rx_mac_ctrl_rcvd); + ESTAT_ADD(rx_xoff_entered); + ESTAT_ADD(rx_frame_too_long_errors); + ESTAT_ADD(rx_jabbers); + ESTAT_ADD(rx_undersize_packets); + ESTAT_ADD(rx_in_length_errors); + ESTAT_ADD(rx_out_length_errors); + ESTAT_ADD(rx_64_or_less_octet_packets); + ESTAT_ADD(rx_65_to_127_octet_packets); + ESTAT_ADD(rx_128_to_255_octet_packets); + ESTAT_ADD(rx_256_to_511_octet_packets); + ESTAT_ADD(rx_512_to_1023_octet_packets); + ESTAT_ADD(rx_1024_to_1522_octet_packets); + ESTAT_ADD(rx_1523_to_2047_octet_packets); + ESTAT_ADD(rx_2048_to_4095_octet_packets); + ESTAT_ADD(rx_4096_to_8191_octet_packets); + ESTAT_ADD(rx_8192_to_9022_octet_packets); + + ESTAT_ADD(tx_octets); + ESTAT_ADD(tx_collisions); + ESTAT_ADD(tx_xon_sent); + ESTAT_ADD(tx_xoff_sent); + ESTAT_ADD(tx_flow_control); + ESTAT_ADD(tx_mac_errors); + ESTAT_ADD(tx_single_collisions); + ESTAT_ADD(tx_mult_collisions); + ESTAT_ADD(tx_deferred); + ESTAT_ADD(tx_excessive_collisions); + ESTAT_ADD(tx_late_collisions); + ESTAT_ADD(tx_collide_2times); + ESTAT_ADD(tx_collide_3times); + ESTAT_ADD(tx_collide_4times); + ESTAT_ADD(tx_collide_5times); + ESTAT_ADD(tx_collide_6times); + ESTAT_ADD(tx_collide_7times); + ESTAT_ADD(tx_collide_8times); + ESTAT_ADD(tx_collide_9times); + ESTAT_ADD(tx_collide_10times); + ESTAT_ADD(tx_collide_11times); + ESTAT_ADD(tx_collide_12times); + ESTAT_ADD(tx_collide_13times); + ESTAT_ADD(tx_collide_14times); + ESTAT_ADD(tx_collide_15times); + ESTAT_ADD(tx_ucast_packets); + ESTAT_ADD(tx_mcast_packets); + ESTAT_ADD(tx_bcast_packets); + ESTAT_ADD(tx_carrier_sense_errors); + ESTAT_ADD(tx_discards); + ESTAT_ADD(tx_errors); + + ESTAT_ADD(dma_writeq_full); + ESTAT_ADD(dma_write_prioq_full); + ESTAT_ADD(rxbds_empty); + ESTAT_ADD(rx_discards); + ESTAT_ADD(rx_errors); + ESTAT_ADD(rx_threshold_hit); + + ESTAT_ADD(dma_readq_full); + ESTAT_ADD(dma_read_prioq_full); + ESTAT_ADD(tx_comp_queue_full); + + ESTAT_ADD(ring_set_send_prod_index); + ESTAT_ADD(ring_status_update); + ESTAT_ADD(nic_irqs); + ESTAT_ADD(nic_avoided_irqs); + ESTAT_ADD(nic_tx_threshold_hit); + + ESTAT_ADD(mbuf_lwm_thresh_hit); + + return estats; +} + +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct tg3 *tp = netdev_priv(dev); + struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev; + struct tg3_hw_stats *hw_stats = tp->hw_stats; + + if (!hw_stats) + return old_stats; + + stats->rx_packets = old_stats->rx_packets + + get_stat64(&hw_stats->rx_ucast_packets) + + get_stat64(&hw_stats->rx_mcast_packets) + + get_stat64(&hw_stats->rx_bcast_packets); + + stats->tx_packets = old_stats->tx_packets + + get_stat64(&hw_stats->tx_ucast_packets) + + get_stat64(&hw_stats->tx_mcast_packets) + + get_stat64(&hw_stats->tx_bcast_packets); + + stats->rx_bytes = old_stats->rx_bytes + + get_stat64(&hw_stats->rx_octets); + stats->tx_bytes = old_stats->tx_bytes + + get_stat64(&hw_stats->tx_octets); + + stats->rx_errors = old_stats->rx_errors + + get_stat64(&hw_stats->rx_errors); + stats->tx_errors = old_stats->tx_errors + + get_stat64(&hw_stats->tx_errors) + + get_stat64(&hw_stats->tx_mac_errors) + + get_stat64(&hw_stats->tx_carrier_sense_errors) + + get_stat64(&hw_stats->tx_discards); + + stats->multicast = old_stats->multicast + + get_stat64(&hw_stats->rx_mcast_packets); + stats->collisions = old_stats->collisions + + get_stat64(&hw_stats->tx_collisions); + + stats->rx_length_errors = old_stats->rx_length_errors + + get_stat64(&hw_stats->rx_frame_too_long_errors) + + get_stat64(&hw_stats->rx_undersize_packets); + + stats->rx_over_errors = old_stats->rx_over_errors + + get_stat64(&hw_stats->rxbds_empty); + stats->rx_frame_errors = old_stats->rx_frame_errors + + get_stat64(&hw_stats->rx_align_errors); + stats->tx_aborted_errors = old_stats->tx_aborted_errors + + get_stat64(&hw_stats->tx_discards); + stats->tx_carrier_errors = old_stats->tx_carrier_errors + + get_stat64(&hw_stats->tx_carrier_sense_errors); + + stats->rx_crc_errors = old_stats->rx_crc_errors + + calc_crc_errors(tp); + + stats->rx_missed_errors = old_stats->rx_missed_errors + + get_stat64(&hw_stats->rx_discards); + + stats->rx_dropped = tp->rx_dropped; + + return stats; +} + +static inline u32 calc_crc(unsigned char *buf, int len) +{ + u32 reg; + u32 tmp; + int j, k; + + reg = 0xffffffff; + + for (j = 0; j < len; j++) { + reg ^= buf[j]; + + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; + + reg >>= 1; + + if (tmp) + reg ^= 0xedb88320; + } + } + + return ~reg; +} + +static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) +{ + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); +} + +static void __tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + +#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } +} + +static void tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); +} + +static int tg3_get_regs_len(struct net_device *dev) +{ + return TG3_REG_BLK_SIZE; +} + +static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + struct tg3 *tp = netdev_priv(dev); + + regs->version = 0; + + memset(_p, 0, TG3_REG_BLK_SIZE); + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; + + tg3_full_lock(tp, 0); + + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); +} + +static int tg3_get_eeprom_len(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; +} + +static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret; + u8 *pd; + u32 i, offset, len, b_offset, b_count; + __be32 val; + + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; + + eeprom->magic = TG3_EEPROM_MAGIC; + + if (offset & 3) { + /* adjustments to start on required 4 byte boundary */ + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) { + /* i.e. offset=1 len=2 */ + b_count = len; + } + ret = tg3_nvram_read_be32(tp, offset-b_offset, &val); + if (ret) + return ret; + memcpy(data, ((char *)&val) + b_offset, b_count); + len -= b_count; + offset += b_count; + eeprom->len += b_count; + } + + /* read bytes up to the last 4 byte boundary */ + pd = &data[eeprom->len]; + for (i = 0; i < (len - (len & 3)); i += 4) { + ret = tg3_nvram_read_be32(tp, offset + i, &val); + if (ret) { + eeprom->len += i; + return ret; + } + memcpy(pd + i, &val, 4); + } + eeprom->len += i; + + if (len & 3) { + /* read last bytes not ending on 4 byte boundary */ + pd = &data[eeprom->len]; + b_count = len & 3; + b_offset = offset + len - b_count; + ret = tg3_nvram_read_be32(tp, b_offset, &val); + if (ret) + return ret; + memcpy(pd, &val, b_count); + eeprom->len += b_count; + } + return 0; +} + +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); + +static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) +{ + struct tg3 *tp = netdev_priv(dev); + int ret; + u32 offset, len, b_offset, odd_len; + u8 *buf; + __be32 start, end; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; + + if (tg3_flag(tp, NO_NVRAM) || + eeprom->magic != TG3_EEPROM_MAGIC) + return -EINVAL; + + offset = eeprom->offset; + len = eeprom->len; + + if ((b_offset = (offset & 3))) { + /* adjustments to start on required 4 byte boundary */ + ret = tg3_nvram_read_be32(tp, offset-b_offset, &start); + if (ret) + return ret; + len += b_offset; + offset &= ~3; + if (len < 4) + len = 4; + } + + odd_len = 0; + if (len & 3) { + /* adjustments to end on required 4 byte boundary */ + odd_len = 1; + len = (len + 3) & ~3; + ret = tg3_nvram_read_be32(tp, offset+len-4, &end); + if (ret) + return ret; + } + + buf = data; + if (b_offset || odd_len) { + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + if (b_offset) + memcpy(buf, &start, 4); + if (odd_len) + memcpy(buf+len-4, &end, 4); + memcpy(buf + b_offset, data, eeprom->len); + } + + ret = tg3_nvram_write_block(tp, offset, len, buf); + + if (buf != data) + kfree(buf); + + return ret; +} + +static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = (SUPPORTED_Autoneg); + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + cmd->supported |= (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { + cmd->supported |= (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP); + cmd->port = PORT_TP; + } else { + cmd->supported |= SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } + + cmd->advertising = tp->link_config.advertising; + if (tg3_flag(tp, PAUSE_AUTONEG)) { + if (tp->link_config.flowctrl & FLOW_CTRL_RX) { + if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Pause; + } else { + cmd->advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } + } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) { + cmd->advertising |= ADVERTISED_Asym_Pause; + } + } + if (netif_running(dev)) { + ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); + cmd->duplex = tp->link_config.active_duplex; + } else { + ethtool_cmd_speed_set(cmd, SPEED_INVALID); + cmd->duplex = DUPLEX_INVALID; + } + cmd->phy_address = tp->phy_addr; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = tp->link_config.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct tg3 *tp = netdev_priv(dev); + u32 speed = ethtool_cmd_speed(cmd); + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_ethtool_sset(phydev, cmd); + } + + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + cmd->duplex != DUPLEX_FULL && + cmd->duplex != DUPLEX_HALF) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_ENABLE) { + u32 mask = ADVERTISED_Autoneg | + ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + mask |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + mask |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + mask |= ADVERTISED_FIBRE; + + if (cmd->advertising & ~mask) + return -EINVAL; + + mask &= (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full); + + cmd->advertising &= mask; + } else { + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) { + if (speed != SPEED_1000) + return -EINVAL; + + if (cmd->duplex != DUPLEX_FULL) + return -EINVAL; + } else { + if (speed != SPEED_100 && + speed != SPEED_10) + return -EINVAL; + } + } + + tg3_full_lock(tp, 0); + + tp->link_config.autoneg = cmd->autoneg; + if (cmd->autoneg == AUTONEG_ENABLE) { + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + } else { + tp->link_config.advertising = 0; + tp->link_config.speed = speed; + tp->link_config.duplex = cmd->duplex; + } + + tp->link_config.orig_speed = tp->link_config.speed; + tp->link_config.orig_duplex = tp->link_config.duplex; + tp->link_config.orig_autoneg = tp->link_config.autoneg; + + if (netif_running(dev)) + tg3_setup_phy(tp, 1); + + tg3_full_unlock(tp); + + return 0; +} + +static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct tg3 *tp = netdev_priv(dev); + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + strcpy(info->fw_version, tp->fw_ver); + strcpy(info->bus_info, pci_name(tp->pdev)); +} + +static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + + if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev)) + wol->supported = WAKE_MAGIC; + else + wol->supported = 0; + wol->wolopts = 0; + if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev)) + wol->wolopts = WAKE_MAGIC; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct tg3 *tp = netdev_priv(dev); + struct device *dp = &tp->pdev->dev; + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + if ((wol->wolopts & WAKE_MAGIC) && + !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp))) + return -EINVAL; + + device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); + + spin_lock_bh(&tp->lock); + if (device_may_wakeup(dp)) + tg3_flag_set(tp, WOL_ENABLE); + else + tg3_flag_clear(tp, WOL_ENABLE); + spin_unlock_bh(&tp->lock); + + return 0; +} + +static u32 tg3_get_msglevel(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + return tp->msg_enable; +} + +static void tg3_set_msglevel(struct net_device *dev, u32 value) +{ + struct tg3 *tp = netdev_priv(dev); + tp->msg_enable = value; +} + +static int tg3_nway_reset(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + int r; + + if (!netif_running(dev)) + return -EAGAIN; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + return -EINVAL; + + if (tg3_flag(tp, USE_PHYLIB)) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); + } else { + u32 bmcr; + + spin_lock_bh(&tp->lock); + r = -EINVAL; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (!tg3_readphy(tp, MII_BMCR, &bmcr) && + ((bmcr & BMCR_ANENABLE) || + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); + r = 0; + } + spin_unlock_bh(&tp->lock); + } + + return r; +} + +static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + + ering->rx_max_pending = tp->rx_std_ring_mask; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask; + else + ering->rx_jumbo_max_pending = 0; + + ering->tx_max_pending = TG3_TX_RING_SIZE - 1; + + ering->rx_pending = tp->rx_pending; + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + ering->rx_jumbo_pending = tp->rx_jumbo_pending; + else + ering->rx_jumbo_pending = 0; + + ering->tx_pending = tp->napi[0].tx_pending; +} + +static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct tg3 *tp = netdev_priv(dev); + int i, irq_sync = 0, err = 0; + + if ((ering->rx_pending > tp->rx_std_ring_mask) || + (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || + (ering->tx_pending > TG3_TX_RING_SIZE - 1) || + (ering->tx_pending <= MAX_SKB_FRAGS) || + (tg3_flag(tp, TSO_BUG) && + (ering->tx_pending <= (MAX_SKB_FRAGS * 3)))) + return -EINVAL; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tp->rx_pending = ering->rx_pending; + + if (tg3_flag(tp, MAX_RXPEND_64) && + tp->rx_pending > 63) + tp->rx_pending = 63; + tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + for (i = 0; i < tp->irq_max; i++) + tp->napi[i].tx_pending = ering->tx_pending; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err) + tg3_phy_start(tp); + + return err; +} + +static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + + epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG); + + if (tp->link_config.active_flowctrl & FLOW_CTRL_RX) + epause->rx_pause = 1; + else + epause->rx_pause = 0; + + if (tp->link_config.active_flowctrl & FLOW_CTRL_TX) + epause->tx_pause = 1; + else + epause->tx_pause = 0; +} + +static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) +{ + struct tg3 *tp = netdev_priv(dev); + int err = 0; + + if (tg3_flag(tp, USE_PHYLIB)) { + u32 newadv; + struct phy_device *phydev; + + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + (epause->rx_pause != epause->tx_pause))) + return -EINVAL; + + tp->link_config.flowctrl = 0; + if (epause->rx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_RX; + + if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Pause; + } else + newadv = ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + } else if (epause->tx_pause) { + tp->link_config.flowctrl |= FLOW_CTRL_TX; + newadv = ADVERTISED_Asym_Pause; + } else + newadv = 0; + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + u32 oldadv = phydev->advertising & + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (oldadv != newadv) { + phydev->advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + phydev->advertising |= newadv; + if (phydev->autoneg) { + /* + * Always renegotiate the link to + * inform our link partner of our + * flow control settings, even if the + * flow control is forced. Let + * tg3_adjust_link() do the final + * flow control setup. + */ + return phy_start_aneg(phydev); + } + } + + if (!epause->autoneg) + tg3_setup_flow_control(tp, 0, 0); + } else { + tp->link_config.orig_advertising &= + ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + tp->link_config.orig_advertising |= newadv; + } + } else { + int irq_sync = 0; + + if (netif_running(dev)) { + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + if (epause->autoneg) + tg3_flag_set(tp, PAUSE_AUTONEG); + else + tg3_flag_clear(tp, PAUSE_AUTONEG); + if (epause->rx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_RX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_RX; + if (epause->tx_pause) + tp->link_config.flowctrl |= FLOW_CTRL_TX; + else + tp->link_config.flowctrl &= ~FLOW_CTRL_TX; + + if (netif_running(dev)) { + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + err = tg3_restart_hw(tp, 1); + if (!err) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + } + + return err; +} + +static int tg3_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } +} + +static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + case ETH_SS_TEST: + memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys)); + break; + default: + WARN_ON(1); /* we need a WARN() */ + break; + } +} + +static int tg3_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!netif_running(tp->dev)) + return -EAGAIN; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 1; /* cycle on/off once per second */ + + case ETHTOOL_ID_ON: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON | + LED_CTRL_100MBPS_ON | + LED_CTRL_10MBPS_ON | + LED_CTRL_TRAFFIC_OVERRIDE | + LED_CTRL_TRAFFIC_BLINK | + LED_CTRL_TRAFFIC_LED); + break; + + case ETHTOOL_ID_OFF: + tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE); + break; + + case ETHTOOL_ID_INACTIVE: + tw32(MAC_LED_CTRL, tp->led_ctrl); + break; + } + + return 0; +} + +static void tg3_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *tmp_stats) +{ + struct tg3 *tp = netdev_priv(dev); + memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); +} + +static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) +{ + int i; + __be32 *buf; + u32 offset = 0, len = 0; + u32 magic, val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic)) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return NULL; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == + TG3_NVM_DIRTYPE_EXTVPD) + break; + } + + if (offset != TG3_NVM_DIR_END) { + len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4; + if (tg3_nvram_read(tp, offset + 4, &offset)) + return NULL; + + offset = tg3_nvram_logical_addr(tp, offset); + } + } + + if (!offset || !len) { + offset = TG3_NVM_VPD_OFF; + len = TG3_NVM_VPD_LEN; + } + + buf = kmalloc(len, GFP_KERNEL); + if (buf == NULL) + return NULL; + + if (magic == TG3_EEPROM_MAGIC) { + for (i = 0; i < len; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4])) + goto error; + } + } else { + u8 *ptr; + ssize_t cnt; + unsigned int pos = 0; + + ptr = (u8 *)&buf[0]; + for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) { + cnt = pci_read_vpd(tp->pdev, pos, + len - pos, ptr); + if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto error; + } + if (pos != len) + goto error; + } + + *vpdlen = len; + + return buf; + +error: + kfree(buf); + return NULL; +} + +#define NVRAM_TEST_SIZE 0x100 +#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14 +#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18 +#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c +#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20 +#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24 +#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50 +#define NVRAM_SELFBOOT_HW_SIZE 0x20 +#define NVRAM_SELFBOOT_DATA_SIZE 0x1c + +static int tg3_test_nvram(struct tg3 *tp) +{ + u32 csum, magic, len; + __be32 *buf; + int i, j, k, err = 0, size; + + if (tg3_flag(tp, NO_NVRAM)) + return 0; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return -EIO; + + if (magic == TG3_EEPROM_MAGIC) + size = NVRAM_TEST_SIZE; + else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) { + if ((magic & TG3_EEPROM_SB_FORMAT_MASK) == + TG3_EEPROM_SB_FORMAT_1) { + switch (magic & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + size = NVRAM_SELFBOOT_FORMAT1_0_SIZE; + break; + case TG3_EEPROM_SB_REVISION_2: + size = NVRAM_SELFBOOT_FORMAT1_2_SIZE; + break; + case TG3_EEPROM_SB_REVISION_3: + size = NVRAM_SELFBOOT_FORMAT1_3_SIZE; + break; + case TG3_EEPROM_SB_REVISION_4: + size = NVRAM_SELFBOOT_FORMAT1_4_SIZE; + break; + case TG3_EEPROM_SB_REVISION_5: + size = NVRAM_SELFBOOT_FORMAT1_5_SIZE; + break; + case TG3_EEPROM_SB_REVISION_6: + size = NVRAM_SELFBOOT_FORMAT1_6_SIZE; + break; + default: + return -EIO; + } + } else + return 0; + } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + size = NVRAM_SELFBOOT_HW_SIZE; + else + return -EIO; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + err = -EIO; + for (i = 0, j = 0; i < size; i += 4, j++) { + err = tg3_nvram_read_be32(tp, i, &buf[j]); + if (err) + break; + } + if (i < size) + goto out; + + /* Selfboot format */ + magic = be32_to_cpu(buf[0]); + if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == + TG3_EEPROM_MAGIC_FW) { + u8 *buf8 = (u8 *) buf, csum8 = 0; + + if ((magic & TG3_EEPROM_SB_REVISION_MASK) == + TG3_EEPROM_SB_REVISION_2) { + /* For rev 2, the csum doesn't include the MBA. */ + for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++) + csum8 += buf8[i]; + for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++) + csum8 += buf8[i]; + } else { + for (i = 0; i < size; i++) + csum8 += buf8[i]; + } + + if (csum8 == 0) { + err = 0; + goto out; + } + + err = -EIO; + goto out; + } + + if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == + TG3_EEPROM_MAGIC_HW) { + u8 data[NVRAM_SELFBOOT_DATA_SIZE]; + u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; + u8 *buf8 = (u8 *) buf; + + /* Separate the parity bits and the data bytes. */ + for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { + if ((i == 0) || (i == 8)) { + int l; + u8 msk; + + for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } else if (i == 16) { + int l; + u8 msk; + + for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + + for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) + parity[k++] = buf8[i] & msk; + i++; + } + data[j++] = buf8[i]; + } + + err = -EIO; + for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) { + u8 hw8 = hweight8(data[i]); + + if ((hw8 & 0x1) && parity[i]) + goto out; + else if (!(hw8 & 0x1) && !parity[i]) + goto out; + } + err = 0; + goto out; + } + + err = -EIO; + + /* Bootstrap checksum at offset 0x10 */ + csum = calc_crc((unsigned char *) buf, 0x10); + if (csum != le32_to_cpu(buf[0x10/4])) + goto out; + + /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ + csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); + if (csum != le32_to_cpu(buf[0xfc/4])) + goto out; + + kfree(buf); + + buf = tg3_vpd_readblock(tp, &len); + if (!buf) + return -ENOMEM; + + i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + + err = 0; + +out: + kfree(buf); + return err; +} + +#define TG3_SERDES_TIMEOUT_SEC 2 +#define TG3_COPPER_TIMEOUT_SEC 6 + +static int tg3_test_link(struct tg3 *tp) +{ + int i, max; + + if (!netif_running(tp->dev)) + return -ENODEV; + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + max = TG3_SERDES_TIMEOUT_SEC; + else + max = TG3_COPPER_TIMEOUT_SEC; + + for (i = 0; i < max; i++) { + if (netif_carrier_ok(tp->dev)) + return 0; + + if (msleep_interruptible(1000)) + break; + } + + return -EIO; +} + +/* Only test the commonly used registers */ +static int tg3_test_registers(struct tg3 *tp) +{ + int i, is_5705, is_5750; + u32 offset, read_mask, write_mask, val, save_val, read_val; + static struct { + u16 offset; + u16 flags; +#define TG3_FL_5705 0x1 +#define TG3_FL_NOT_5705 0x2 +#define TG3_FL_NOT_5788 0x4 +#define TG3_FL_NOT_5750 0x8 + u32 read_mask; + u32 write_mask; + } reg_tbl[] = { + /* MAC Control Registers */ + { MAC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00ef6f8c }, + { MAC_MODE, TG3_FL_5705, + 0x00000000, 0x01ef6b8c }, + { MAC_STATUS, TG3_FL_NOT_5705, + 0x03800107, 0x00000000 }, + { MAC_STATUS, TG3_FL_5705, + 0x03800100, 0x00000000 }, + { MAC_ADDR_0_HIGH, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_ADDR_0_LOW, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_RX_MTU_SIZE, 0x0000, + 0x00000000, 0x0000ffff }, + { MAC_TX_MODE, 0x0000, + 0x00000000, 0x00000070 }, + { MAC_TX_LENGTHS, 0x0000, + 0x00000000, 0x00003fff }, + { MAC_RX_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x000007fc }, + { MAC_RX_MODE, TG3_FL_5705, + 0x00000000, 0x000007dc }, + { MAC_HASH_REG_0, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_1, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_2, 0x0000, + 0x00000000, 0xffffffff }, + { MAC_HASH_REG_3, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive Data and Receive BD Initiator Control Registers. */ + { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705, + 0x00000000, 0x00000003 }, + { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+0, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+4, 0x0000, + 0x00000000, 0xffffffff }, + { RCVDBDI_STD_BD+8, 0x0000, + 0x00000000, 0xffff0002 }, + { RCVDBDI_STD_BD+0xc, 0x0000, + 0x00000000, 0xffffffff }, + + /* Receive BD Initiator Control Registers. */ + { RCVBDI_STD_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { RCVBDI_STD_THRESH, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + + /* Host Coalescing Control Registers. */ + { HOSTCC_MODE, TG3_FL_NOT_5705, + 0x00000000, 0x00000004 }, + { HOSTCC_MODE, TG3_FL_5705, + 0x00000000, 0x000000f6 }, + { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOL_TICKS, TG3_FL_5705, + 0x00000000, 0x000003ff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788, + 0x00000000, 0x000000ff }, + { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000, + 0x00000000, 0xffffffff }, + { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000, + 0xffffffff, 0x00000000 }, + + /* Buffer Manager Control Registers. */ + { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750, + 0x00000000, 0x007fff80 }, + { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750, + 0x00000000, 0x007fffff }, + { BUFMGR_MB_RDMA_LOW_WATER, 0x0000, + 0x00000000, 0x0000003f }, + { BUFMGR_MB_MACRX_LOW_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_MB_HIGH_WATER, 0x0000, + 0x00000000, 0x000001ff }, + { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705, + 0xffffffff, 0x00000000 }, + + /* Mailbox Registers */ + { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705, + 0x00000000, 0x000001ff }, + { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000, + 0x00000000, 0x000007ff }, + { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000, + 0x00000000, 0x000001ff }, + + { 0xffff, 0x0000, 0x00000000, 0x00000000 }, + }; + + is_5705 = is_5750 = 0; + if (tg3_flag(tp, 5705_PLUS)) { + is_5705 = 1; + if (tg3_flag(tp, 5750_PLUS)) + is_5750 = 1; + } + + for (i = 0; reg_tbl[i].offset != 0xffff; i++) { + if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705)) + continue; + + if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705)) + continue; + + if (tg3_flag(tp, IS_5788) && + (reg_tbl[i].flags & TG3_FL_NOT_5788)) + continue; + + if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750)) + continue; + + offset = (u32) reg_tbl[i].offset; + read_mask = reg_tbl[i].read_mask; + write_mask = reg_tbl[i].write_mask; + + /* Save the original register content */ + save_val = tr32(offset); + + /* Determine the read-only value. */ + read_val = save_val & read_mask; + + /* Write zero to the register, then make sure the read-only bits + * are not changed and the read/write bits are all zeros. + */ + tw32(offset, 0); + + val = tr32(offset); + + /* Test the read-only and read/write bits. */ + if (((val & read_mask) != read_val) || (val & write_mask)) + goto out; + + /* Write ones to all the bits defined by RdMask and WrMask, then + * make sure the read-only bits are not changed and the + * read/write bits are all ones. + */ + tw32(offset, read_mask | write_mask); + + val = tr32(offset); + + /* Test the read-only bits. */ + if ((val & read_mask) != read_val) + goto out; + + /* Test the read/write bits. */ + if ((val & write_mask) != write_mask) + goto out; + + tw32(offset, save_val); + } + + return 0; + +out: + if (netif_msg_hw(tp)) + netdev_err(tp->dev, + "Register test failed at offset %x\n", offset); + tw32(offset, save_val); + return -EIO; +} + +static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) +{ + static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a }; + int i; + u32 j; + + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { + for (j = 0; j < len; j += 4) { + u32 val; + + tg3_write_mem(tp, offset + j, test_pattern[i]); + tg3_read_mem(tp, offset + j, &val); + if (val != test_pattern[i]) + return -EIO; + } + } + return 0; +} + +static int tg3_test_memory(struct tg3 *tp) +{ + static struct mem_entry { + u32 offset; + u32 len; + } mem_tbl_570x[] = { + { 0x00000000, 0x00b50}, + { 0x00002000, 0x1c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5705[] = { + { 0x00000100, 0x0000c}, + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x01000}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0e000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5755[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x00800}, + { 0x00008000, 0x02000}, + { 0x00010000, 0x0c000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5906[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00400}, + { 0x00006000, 0x00400}, + { 0x00008000, 0x01000}, + { 0x00010000, 0x01000}, + { 0xffffffff, 0x00000} + }, mem_tbl_5717[] = { + { 0x00000200, 0x00008}, + { 0x00010000, 0x0a000}, + { 0x00020000, 0x13c00}, + { 0xffffffff, 0x00000} + }, mem_tbl_57765[] = { + { 0x00000200, 0x00008}, + { 0x00004000, 0x00800}, + { 0x00006000, 0x09800}, + { 0x00010000, 0x0a000}, + { 0xffffffff, 0x00000} + }; + struct mem_entry *mem_tbl; + int err = 0; + int i; + + if (tg3_flag(tp, 5717_PLUS)) + mem_tbl = mem_tbl_5717; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + mem_tbl = mem_tbl_57765; + else if (tg3_flag(tp, 5755_PLUS)) + mem_tbl = mem_tbl_5755; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mem_tbl = mem_tbl_5906; + else if (tg3_flag(tp, 5705_PLUS)) + mem_tbl = mem_tbl_5705; + else + mem_tbl = mem_tbl_570x; + + for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { + err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len); + if (err) + break; + } + + return err; +} + +#define TG3_TSO_MSS 500 + +#define TG3_TSO_IP_HDR_LEN 20 +#define TG3_TSO_TCP_HDR_LEN 20 +#define TG3_TSO_TCP_OPT_LEN 12 + +static const u8 tg3_tso_header[] = { +0x08, 0x00, +0x45, 0x00, 0x00, 0x00, +0x00, 0x00, 0x40, 0x00, +0x40, 0x06, 0x00, 0x00, +0x0a, 0x00, 0x00, 0x01, +0x0a, 0x00, 0x00, 0x02, +0x0d, 0x00, 0xe0, 0x00, +0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x02, 0x00, +0x80, 0x10, 0x10, 0x00, +0x14, 0x09, 0x00, 0x00, +0x01, 0x01, 0x08, 0x0a, +0x11, 0x11, 0x11, 0x11, +0x11, 0x11, 0x11, 0x11, +}; + +static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) +{ + u32 rx_start_idx, rx_idx, tx_idx, opaque_key; + u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val; + u32 budget; + struct sk_buff *skb, *rx_skb; + u8 *tx_data; + dma_addr_t map; + int num_pkts, tx_len, rx_len, i, err; + struct tg3_rx_buffer_desc *desc; + struct tg3_napi *tnapi, *rnapi; + struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring; + + tnapi = &tp->napi[0]; + rnapi = &tp->napi[0]; + if (tp->irq_cnt > 1) { + if (tg3_flag(tp, ENABLE_RSS)) + rnapi = &tp->napi[1]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi = &tp->napi[1]; + } + coal_now = tnapi->coal_now | rnapi->coal_now; + + err = -EIO; + + tx_len = pktsz; + skb = netdev_alloc_skb(tp->dev, tx_len); + if (!skb) + return -ENOMEM; + + tx_data = skb_put(skb, tx_len); + memcpy(tx_data, tp->dev->dev_addr, 6); + memset(tx_data + 6, 0x0, 8); + + tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); + + if (tso_loopback) { + struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN]; + + u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN + + TG3_TSO_TCP_OPT_LEN; + + memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header, + sizeof(tg3_tso_header)); + mss = TG3_TSO_MSS; + + val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header); + num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS); + + /* Set the total length field in the IP header */ + iph->tot_len = htons((u16)(mss + hdr_len)); + + base_flags = (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + struct tcphdr *th; + val = ETH_HLEN + TG3_TSO_IP_HDR_LEN; + th = (struct tcphdr *)&tx_data[val]; + th->check = 0; + } else + base_flags |= TXD_FLAG_TCPUDP_CSUM; + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + mss |= (TG3_TSO_TCP_OPT_LEN << 9); + } else { + base_flags |= (TG3_TSO_TCP_OPT_LEN << 10); + } + + data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header); + } else { + num_pkts = 1; + data_off = ETH_HLEN; + } + + for (i = data_off; i < tx_len; i++) + tx_data[i] = (u8) (i & 0xff); + + map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + + val = tnapi->tx_prod; + tnapi->tx_buffers[val].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); + + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + rnapi->coal_now); + + udelay(10); + + rx_start_idx = rnapi->hw_status->idx[0].rx_producer; + + budget = tg3_tx_avail(tnapi); + if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, + base_flags | TXD_FLAG_END, mss, 0)) { + tnapi->tx_buffers[val].skb = NULL; + dev_kfree_skb(skb); + return -EIO; + } + + tnapi->tx_prod++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + tr32_mailbox(tnapi->prodmbox); + + udelay(10); + + /* 350 usec to allow enough time on some 10/100 Mbps devices. */ + for (i = 0; i < 35; i++) { + tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | + coal_now); + + udelay(10); + + tx_idx = tnapi->hw_status->idx[0].tx_consumer; + rx_idx = rnapi->hw_status->idx[0].rx_producer; + if ((tx_idx == tnapi->tx_prod) && + (rx_idx == (rx_start_idx + num_pkts))) + break; + } + + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); + dev_kfree_skb(skb); + + if (tx_idx != tnapi->tx_prod) + goto out; + + if (rx_idx != rx_start_idx + num_pkts) + goto out; + + val = data_off; + while (rx_idx != rx_start_idx) { + desc = &rnapi->rx_rcb[rx_start_idx++]; + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) + goto out; + + rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) + - ETH_FCS_LEN; + + if (!tso_loopback) { + if (rx_len != tx_len) + goto out; + + if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) { + if (opaque_key != RXD_OPAQUE_RING_STD) + goto out; + } else { + if (opaque_key != RXD_OPAQUE_RING_JUMBO) + goto out; + } + } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT != 0xffff) { + goto out; + } + + if (opaque_key == RXD_OPAQUE_RING_STD) { + rx_skb = tpr->rx_std_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], + mapping); + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + rx_skb = tpr->rx_jmb_buffers[desc_idx].skb; + map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], + mapping); + } else + goto out; + + pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, + PCI_DMA_FROMDEVICE); + + for (i = data_off; i < rx_len; i++, val++) { + if (*(rx_skb->data + i) != (u8) (val & 0xff)) + goto out; + } + } + + err = 0; + + /* tg3_free_rings will unmap and free the rx_skb */ +out: + return err; +} + +#define TG3_STD_LOOPBACK_FAILED 1 +#define TG3_JMB_LOOPBACK_FAILED 2 +#define TG3_TSO_LOOPBACK_FAILED 4 +#define TG3_LOOPBACK_FAILED \ + (TG3_STD_LOOPBACK_FAILED | \ + TG3_JMB_LOOPBACK_FAILED | \ + TG3_TSO_LOOPBACK_FAILED) + +static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk) +{ + int err = -EIO; + u32 eee_cap; + + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + + if (!netif_running(tp->dev)) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + err = tg3_reset_hw(tp, 1); + if (err) { + data[0] = TG3_LOOPBACK_FAILED; + data[1] = TG3_LOOPBACK_FAILED; + if (do_extlpbk) + data[2] = TG3_LOOPBACK_FAILED; + goto done; + } + + if (tg3_flag(tp, ENABLE_RSS)) { + int i; + + /* Reroute all rx packets to the 1st queue */ + for (i = MAC_RSS_INDIR_TBL_0; + i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4) + tw32(i, 0x0); + } + + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) { + tg3_mac_loopback(tp, true); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[0] |= TG3_STD_LOOPBACK_FAILED; + + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[0] |= TG3_JMB_LOOPBACK_FAILED; + + tg3_mac_loopback(tp, false); + } + + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tg3_flag(tp, USE_PHYLIB)) { + int i; + + tg3_phy_lpbk_set(tp, 0, false); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[1] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[1] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[1] |= TG3_JMB_LOOPBACK_FAILED; + + if (do_extlpbk) { + tg3_phy_lpbk_set(tp, 0, true); + + /* All link indications report up, but the hardware + * isn't really ready for about 20 msec. Double it + * to be sure. + */ + mdelay(40); + + if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) + data[2] |= TG3_STD_LOOPBACK_FAILED; + if (tg3_flag(tp, TSO_CAPABLE) && + tg3_run_loopback(tp, ETH_FRAME_LEN, true)) + data[2] |= TG3_TSO_LOOPBACK_FAILED; + if (tg3_flag(tp, JUMBO_RING_ENABLE) && + tg3_run_loopback(tp, 9000 + ETH_HLEN, false)) + data[2] |= TG3_JMB_LOOPBACK_FAILED; + } + + /* Re-enable gphy autopowerdown. */ + if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) + tg3_phy_toggle_apd(tp, true); + } + + err = (data[0] | data[1] | data[2]) ? -EIO : 0; + +done: + tp->phy_flags |= eee_cap; + + return err; +} + +static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *data) +{ + struct tg3 *tp = netdev_priv(dev); + bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB; + + if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && + tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + + memset(data, 0, sizeof(u64) * TG3_NUM_TEST); + + if (tg3_test_nvram(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[0] = 1; + } + if (!doextlpbk && tg3_test_link(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + data[1] = 1; + } + if (etest->flags & ETH_TEST_FL_OFFLINE) { + int err, err2 = 0, irq_sync = 0; + + if (netif_running(dev)) { + tg3_phy_stop(tp); + tg3_netif_stop(tp); + irq_sync = 1; + } + + tg3_full_lock(tp, irq_sync); + + tg3_halt(tp, RESET_KIND_SUSPEND, 1); + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!tg3_flag(tp, 5705_PLUS)) + tg3_halt_cpu(tp, TX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); + + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + tg3_phy_reset(tp); + + if (tg3_test_registers(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[2] = 1; + } + + if (tg3_test_memory(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[3] = 1; + } + + if (doextlpbk) + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + + if (tg3_test_loopback(tp, &data[4], doextlpbk)) + etest->flags |= ETH_TEST_FL_FAILED; + + tg3_full_unlock(tp); + + if (tg3_test_interrupt(tp) != 0) { + etest->flags |= ETH_TEST_FL_FAILED; + data[7] = 1; + } + + tg3_full_lock(tp, 0); + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + if (netif_running(dev)) { + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (!err2) + tg3_netif_start(tp); + } + + tg3_full_unlock(tp); + + if (irq_sync && !err2) + tg3_phy_start(tp); + } + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + tg3_power_down(tp); + +} + +static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (tg3_flag(tp, USE_PHYLIB)) { + struct phy_device *phydev; + if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) + return -EAGAIN; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + return phy_mii_ioctl(phydev, ifr, cmd); + } + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = tp->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u32 mii_regval; + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); + spin_unlock_bh(&tp->lock); + + data->val_out = mii_regval; + + return err; + } + + case SIOCSMIIREG: + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + break; /* We have no PHY */ + + if (!netif_running(dev)) + return -EAGAIN; + + spin_lock_bh(&tp->lock); + err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); + spin_unlock_bh(&tp->lock); + + return err; + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + + memcpy(ec, &tp->coal, sizeof(*ec)); + return 0; +} + +static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct tg3 *tp = netdev_priv(dev); + u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; + u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0; + + if (!tg3_flag(tp, 5705_PLUS)) { + max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT; + max_txcoal_tick_int = MAX_TXCOAL_TICK_INT; + max_stat_coal_ticks = MAX_STAT_COAL_TICKS; + min_stat_coal_ticks = MIN_STAT_COAL_TICKS; + } + + if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || + (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || + (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || + (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) || + (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) || + (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) || + (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) || + (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) + return -EINVAL; + + /* No rx interrupts will be generated if both are zero */ + if ((ec->rx_coalesce_usecs == 0) && + (ec->rx_max_coalesced_frames == 0)) + return -EINVAL; + + /* No tx interrupts will be generated if both are zero */ + if ((ec->tx_coalesce_usecs == 0) && + (ec->tx_max_coalesced_frames == 0)) + return -EINVAL; + + /* Only copy relevant parameters, ignore all others. */ + tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; + tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; + tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames; + tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; + tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; + tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq; + tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq; + tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs; + + if (netif_running(dev)) { + tg3_full_lock(tp, 0); + __tg3_set_coalesce(tp, &tp->coal); + tg3_full_unlock(tp); + } + return 0; +} + +static const struct ethtool_ops tg3_ethtool_ops = { + .get_settings = tg3_get_settings, + .set_settings = tg3_set_settings, + .get_drvinfo = tg3_get_drvinfo, + .get_regs_len = tg3_get_regs_len, + .get_regs = tg3_get_regs, + .get_wol = tg3_get_wol, + .set_wol = tg3_set_wol, + .get_msglevel = tg3_get_msglevel, + .set_msglevel = tg3_set_msglevel, + .nway_reset = tg3_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = tg3_get_eeprom_len, + .get_eeprom = tg3_get_eeprom, + .set_eeprom = tg3_set_eeprom, + .get_ringparam = tg3_get_ringparam, + .set_ringparam = tg3_set_ringparam, + .get_pauseparam = tg3_get_pauseparam, + .set_pauseparam = tg3_set_pauseparam, + .self_test = tg3_self_test, + .get_strings = tg3_get_strings, + .set_phys_id = tg3_set_phys_id, + .get_ethtool_stats = tg3_get_ethtool_stats, + .get_coalesce = tg3_get_coalesce, + .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, +}; + +static void __devinit tg3_get_eeprom_size(struct tg3 *tp) +{ + u32 cursize, val, magic; + + tp->nvram_size = EEPROM_CHIP_SIZE; + + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; + + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + return; + + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; + + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) + break; + + cursize <<= 1; + } + + tp->nvram_size = cursize; +} + +static void __devinit tg3_get_nvram_size(struct tg3 *tp) +{ + u32 val; + + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; +} + +static void __devinit tg3_get_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; + break; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; + break; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; + break; + } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } +} + +static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) +{ + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; + break; + } +} + +static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + break; + } + + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } +} + +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } +} + +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } +} + +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + } + } +} + +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; +} + +static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + + +static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; + + nvcfg1 = tr32(NVRAM_CFG1); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; + + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; + + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} + +/* Chips other than 5700/5701 use the NVRAM for fetching info. */ +static void __devinit tg3_nvram_init(struct tg3 *tp) +{ + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); + + msleep(1); + + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); + + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; + } + tg3_enable_nvram_access(tp); + + tp->nvram_size = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); + + tg3_get_eeprom_size(tp); + } +} + +static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) +{ + int i, j, rc = 0; + u32 val; + + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; + + addr = offset + i; + + memcpy(&data, buf + i, 4); + + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) + break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; + } + } + + return rc; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; + + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; + + while (len) { + int j; + u32 phy_addr, page_off, size; + + phy_addr = offset & ~pagemask; + + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; + + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; + + len -= size; + + memcpy(tmp + page_off, buf, size); + + offset = offset + (pagesize - page_off); + + tg3_enable_nvram_access(tp); + + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; + + for (j = 0; j < pagesize; j += 4) { + __be32 data; + + data = *((__be32 *) (tmp + j)); + + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + tw32(NVRAM_ADDR, phy_addr + j); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; + + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + if (ret) + break; + } + + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); + + kfree(tmp); + + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) +{ + int i, ret = 0; + + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; + + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); + + page_off = offset % tp->nvram_pagesize; + + phy_addr = tg3_nvram_phys_addr(tp, offset); + + tw32(NVRAM_ADDR, phy_addr); + + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + + if ((ret = tg3_nvram_exec_cmd(tp, + NVRAM_CMD_WREN | NVRAM_CMD_GO | + NVRAM_CMD_DONE))) + + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } + + if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + break; + } + return ret; +} + +/* offset and length are dword aligned */ +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) +{ + int ret; + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } + + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; + + ret = tg3_nvram_lock(tp); + if (ret) + return ret; + + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); + } + + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); + } + + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } + + return ret; +} + +struct subsys_tbl_ent { + u16 subsys_vendor, subsys_devid; + u32 phy_id; +}; + +static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { + /* Broadcom boards. */ + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 }, + { TG3PCI_SUBVENDOR_ID_BROADCOM, + TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 }, + + /* 3com boards. */ + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_3COM, + TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 }, + + /* DELL boards. */ + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 }, + { TG3PCI_SUBVENDOR_ID_DELL, + TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 }, + + /* Compaq boards. */ + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 }, + { TG3PCI_SUBVENDOR_ID_COMPAQ, + TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 }, + + /* IBM boards. */ + { TG3PCI_SUBVENDOR_ID_IBM, + TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } +}; + +static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) { + if ((subsys_id_to_phy_id[i].subsys_vendor == + tp->pdev->subsystem_vendor) && + (subsys_id_to_phy_id[i].subsys_devid == + tp->pdev->subsystem_device)) + return &subsys_id_to_phy_id[i]; + } + return NULL; +} + +static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) +{ + u32 val; + + tp->phy_id = TG3_PHY_ID_INVALID; + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + /* Assume an onboard device and WOL capable by default. */ + tg3_flag_set(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, WOL_CAP); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) + tg3_flag_set(tp, ASPM_WORKAROUND); + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + goto done; + } + + tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); + if (val == NIC_SRAM_DATA_SIG_MAGIC) { + u32 nic_cfg, led_cfg; + u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; + int eeprom_phy_serdes = 0; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); + tp->nic_sram_data_cfg = nic_cfg; + + tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver); + ver >>= NIC_SRAM_DATA_VER_SHIFT; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 && + (ver > 0) && (ver < 0x100)) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); + + if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == + NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) + eeprom_phy_serdes = 1; + + tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id); + if (nic_phy_id != 0) { + u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK; + u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK; + + eeprom_phy_id = (id1 >> 16) << 10; + eeprom_phy_id |= (id2 & 0xfc00) << 16; + eeprom_phy_id |= (id2 & 0x03ff) << 0; + } else + eeprom_phy_id = 0; + + tp->phy_id = eeprom_phy_id; + if (eeprom_phy_serdes) { + if (!tg3_flag(tp, 5705_PLUS)) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags |= TG3_PHYFLG_MII_SERDES; + } + + if (tg3_flag(tp, 5750_PLUS)) + led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK | + SHASTA_EXT_LED_MODE_MASK); + else + led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK; + + switch (led_cfg) { + default: + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1: + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2: + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + break; + + case NIC_SRAM_DATA_CFG_LED_MODE_MAC: + tp->led_ctrl = LED_CTRL_MODE_MAC; + + /* Default to PHY_1_MODE if 0 (MAC_MODE) is + * read on some older 5700/5701 bootcode. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5701) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + break; + + case SHASTA_EXT_LED_SHARED: + tp->led_ctrl = LED_CTRL_MODE_SHARED; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5750_A1) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + case SHASTA_EXT_LED_MAC: + tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC; + break; + + case SHASTA_EXT_LED_COMBO: + tp->led_ctrl = LED_CTRL_MODE_COMBO; + if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) + tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 | + LED_CTRL_MODE_PHY_2); + break; + + } + + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) && + tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + tp->led_ctrl = LED_CTRL_MODE_PHY_2; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) + tp->led_ctrl = LED_CTRL_MODE_PHY_1; + + if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) { + tg3_flag_set(tp, EEPROM_WRITE_PROT); + if ((tp->pdev->subsystem_vendor == + PCI_VENDOR_ID_ARIMA) && + (tp->pdev->subsystem_device == 0x205a || + tp->pdev->subsystem_device == 0x2063)) + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + } else { + tg3_flag_clear(tp, EEPROM_WRITE_PROT); + tg3_flag_set(tp, IS_NIC); + } + + if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) { + tg3_flag_set(tp, ENABLE_ASF); + if (tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ASF_NEW_HANDSHAKE); + } + + if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) && + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, ENABLE_APE); + + if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES && + !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) + tg3_flag_clear(tp, WOL_CAP); + + if (tg3_flag(tp, WOL_CAP) && + (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) { + tg3_flag_set(tp, WOL_ENABLE); + device_set_wakeup_enable(&tp->pdev->dev, true); + } + + if (cfg2 & (1 << 17)) + tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING; + + /* serdes signal pre-emphasis in register 0x590 set by */ + /* bootcode if bit 18 is set */ + if (cfg2 & (1 << 18)) + tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; + + if ((tg3_flag(tp, 57765_PLUS) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && + (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) + tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; + + if (tg3_flag(tp, PCI_EXPRESS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + !tg3_flag(tp, 57765_PLUS)) { + u32 cfg3; + + tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3); + if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE) + tg3_flag_set(tp, ASPM_WORKAROUND); + } + + if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE) + tg3_flag_set(tp, RGMII_INBAND_DISABLE); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN); + if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) + tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + } +done: + if (tg3_flag(tp, WOL_CAP)) + device_set_wakeup_enable(&tp->pdev->dev, + tg3_flag(tp, WOL_ENABLE)); + else + device_set_wakeup_capable(&tp->pdev->dev, false); +} + +static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) +{ + int i; + u32 val; + + tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START); + tw32(OTP_CTRL, cmd); + + /* Wait for up to 1 ms for command to execute. */ + for (i = 0; i < 100; i++) { + val = tr32(OTP_STATUS); + if (val & OTP_STATUS_CMD_DONE) + break; + udelay(10); + } + + return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY; +} + +/* Read the gphy configuration from the OTP region of the chip. The gphy + * configuration is a 32-bit value that straddles the alignment boundary. + * We do two 32-bit reads and then shift and merge the results. + */ +static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) +{ + u32 bhalf_otp, thalf_otp; + + tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT)) + return 0; + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + thalf_otp = tr32(OTP_READ_DATA); + + tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2); + + if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ)) + return 0; + + bhalf_otp = tr32(OTP_READ_DATA); + + return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); +} + +static void __devinit tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; +} + +static int __devinit tg3_phy_probe(struct tg3 *tp) +{ + u32 hw_phy_id_1, hw_phy_id_2; + u32 hw_phy_id, hw_phy_id_masked; + int err; + + /* flow control autonegotiation is default behavior */ + tg3_flag_set(tp, PAUSE_AUTONEG); + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + + if (tg3_flag(tp, USE_PHYLIB)) + return tg3_phy_init(tp); + + /* Reading the PHY ID register can conflict with ASF + * firmware access to the PHY hardware. + */ + err = 0; + if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) { + hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID; + } else { + /* Now read the physical PHY_ID from the chip and verify + * that it is sane. If it doesn't look good, we fall back + * to either the hard-coded table based PHY_ID and failing + * that the value found in the eeprom area. + */ + err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1); + err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2); + + hw_phy_id = (hw_phy_id_1 & 0xffff) << 10; + hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16; + hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0; + + hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK; + } + + if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) { + tp->phy_id = hw_phy_id; + if (hw_phy_id_masked == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + else + tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES; + } else { + if (tp->phy_id != TG3_PHY_ID_INVALID) { + /* Do nothing, phy ID already set up in + * tg3_get_eeprom_hw_cfg(). + */ + } else { + struct subsys_tbl_ent *p; + + /* No eeprom signature? Try the hardcoded + * subsys device table. + */ + p = tg3_lookup_by_subsys(tp); + if (!p) + return -ENODEV; + + tp->phy_id = p->phy_id; + if (!tp->phy_id || + tp->phy_id == TG3_PHY_ID_BCM8002) + tp->phy_flags |= TG3_PHYFLG_PHY_SERDES; + } + } + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 || + (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 && + tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && + tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) + tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + + tg3_phy_init_link_config(tp); + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && + !tg3_flag(tp, ENABLE_APE) && + !tg3_flag(tp, ENABLE_ASF)) { + u32 bmsr, mask; + + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + goto skip_phy_reset; + + err = tg3_phy_reset(tp); + if (err) + return err; + + tg3_phy_set_wirespeed(tp); + + mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full); + if (!tg3_copper_is_advertising_all(tp, mask)) { + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); + + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } + } + +skip_phy_reset: + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + + err = tg3_init_5401phy_dsp(tp); + } + + return err; +} + +static void __devinit tg3_read_vpd(struct tg3 *tp) +{ + u8 *vpd_data; + unsigned int block_end, rosize, len; + u32 vpdlen; + int j, i = 0; + + vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); + if (!vpd_data) + goto out_no_vpd; + + i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); + if (i < 0) + goto out_not_found; + + rosize = pci_vpd_lrdt_size(&vpd_data[i]); + block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; + i += PCI_VPD_LRDT_TAG_SIZE; + + if (block_end > vpdlen) + goto out_not_found; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_MFR_ID); + if (j > 0) { + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end || len != 4 || + memcmp(&vpd_data[j], "1028", 4)) + goto partno; + + j = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_VENDOR0); + if (j < 0) + goto partno; + + len = pci_vpd_info_field_size(&vpd_data[j]); + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + if (j + len > block_end) + goto partno; + + memcpy(tp->fw_ver, &vpd_data[j], len); + strncat(tp->fw_ver, " bc ", vpdlen - len - 1); + } + +partno: + i = pci_vpd_find_info_keyword(vpd_data, i, rosize, + PCI_VPD_RO_KEYWORD_PARTNO); + if (i < 0) + goto out_not_found; + + len = pci_vpd_info_field_size(&vpd_data[i]); + + i += PCI_VPD_INFO_FLD_HDR_SIZE; + if (len > TG3_BPN_SIZE || + (len + i) > vpdlen) + goto out_not_found; + + memcpy(tp->board_part_number, &vpd_data[i], len); + +out_not_found: + kfree(vpd_data); + if (tp->board_part_number[0]) + return; + +out_no_vpd: + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) + strcpy(tp->board_part_number, "BCM5717"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) + strcpy(tp->board_part_number, "BCM5718"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) + strcpy(tp->board_part_number, "BCM57780"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) + strcpy(tp->board_part_number, "BCM57760"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) + strcpy(tp->board_part_number, "BCM57790"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) + strcpy(tp->board_part_number, "BCM57788"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) + strcpy(tp->board_part_number, "BCM57761"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) + strcpy(tp->board_part_number, "BCM57765"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) + strcpy(tp->board_part_number, "BCM57781"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) + strcpy(tp->board_part_number, "BCM57785"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) + strcpy(tp->board_part_number, "BCM57791"); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + strcpy(tp->board_part_number, "BCM57795"); + else + goto nomatch; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + strcpy(tp->board_part_number, "BCM95906"); + } else { +nomatch: + strcpy(tp->board_part_number, "none"); + } +} + +static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) +{ + u32 val; + + if (tg3_nvram_read(tp, offset, &val) || + (val & 0xfc000000) != 0x0c000000 || + tg3_nvram_read(tp, offset + 4, &val) || + val != 0) + return 0; + + return 1; +} + +static void __devinit tg3_read_bc_ver(struct tg3 *tp) +{ + u32 val, offset, start, ver_offset; + int i, dst_off; + bool newver = false; + + if (tg3_nvram_read(tp, 0xc, &offset) || + tg3_nvram_read(tp, 0x4, &start)) + return; + + offset = tg3_nvram_logical_addr(tp, offset); + + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val & 0xfc000000) == 0x0c000000) { + if (tg3_nvram_read(tp, offset + 4, &val)) + return; + + if (val == 0) + newver = true; + } + + dst_off = strlen(tp->fw_ver); + + if (newver) { + if (TG3_VER_SIZE - dst_off < 16 || + tg3_nvram_read(tp, offset + 8, &ver_offset)) + return; + + offset = offset + ver_offset - start; + for (i = 0; i < 16; i += 4) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset + i, &v)) + return; + + memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v)); + } + } else { + u32 major, minor; + + if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset)) + return; + + major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> + TG3_NVM_BCVER_MAJSFT; + minor = ver_offset & TG3_NVM_BCVER_MINMSK; + snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off, + "v%d.%02d", major, minor); + } +} + +static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) +{ + u32 val, major, minor; + + /* Use native endian representation */ + if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val)) + return; + + major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >> + TG3_NVM_HWSB_CFG1_MAJSFT; + minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >> + TG3_NVM_HWSB_CFG1_MINSFT; + + snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); +} + +static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) +{ + u32 offset, major, minor, build; + + strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1); + + if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) + return; + + switch (val & TG3_EEPROM_SB_REVISION_MASK) { + case TG3_EEPROM_SB_REVISION_0: + offset = TG3_EEPROM_SB_F1R0_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_2: + offset = TG3_EEPROM_SB_F1R2_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_3: + offset = TG3_EEPROM_SB_F1R3_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_4: + offset = TG3_EEPROM_SB_F1R4_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_5: + offset = TG3_EEPROM_SB_F1R5_EDH_OFF; + break; + case TG3_EEPROM_SB_REVISION_6: + offset = TG3_EEPROM_SB_F1R6_EDH_OFF; + break; + default: + return; + } + + if (tg3_nvram_read(tp, offset, &val)) + return; + + build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >> + TG3_EEPROM_SB_EDH_BLD_SHFT; + major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >> + TG3_EEPROM_SB_EDH_MAJ_SHFT; + minor = val & TG3_EEPROM_SB_EDH_MIN_MASK; + + if (minor > 99 || build > 26) + return; + + offset = strlen(tp->fw_ver); + snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset, + " v%d.%02d", major, minor); + + if (build > 0) { + offset = strlen(tp->fw_ver); + if (offset < TG3_VER_SIZE - 1) + tp->fw_ver[offset] = 'a' + build - 1; + } +} + +static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) +{ + u32 val, offset, start; + int i, vlen; + + for (offset = TG3_NVM_DIR_START; + offset < TG3_NVM_DIR_END; + offset += TG3_NVM_DIRENT_SIZE) { + if (tg3_nvram_read(tp, offset, &val)) + return; + + if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI) + break; + } + + if (offset == TG3_NVM_DIR_END) + return; + + if (!tg3_flag(tp, 5705_PLUS)) + start = 0x08000000; + else if (tg3_nvram_read(tp, offset - 4, &start)) + return; + + if (tg3_nvram_read(tp, offset + 4, &offset) || + !tg3_fw_img_is_valid(tp, offset) || + tg3_nvram_read(tp, offset + 8, &val)) + return; + + offset += val - start; + + vlen = strlen(tp->fw_ver); + + tp->fw_ver[vlen++] = ','; + tp->fw_ver[vlen++] = ' '; + + for (i = 0; i < 4; i++) { + __be32 v; + if (tg3_nvram_read_be32(tp, offset, &v)) + return; + + offset += sizeof(v); + + if (vlen > TG3_VER_SIZE - sizeof(v)) { + memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen); + break; + } + + memcpy(&tp->fw_ver[vlen], &v, sizeof(v)); + vlen += sizeof(v); + } +} + +static void __devinit tg3_read_dash_ver(struct tg3 *tp) +{ + int vlen; + u32 apedata; + char *fwtype; + + if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (!(apedata & APE_FW_STATUS_READY)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); + + if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) { + tg3_flag_set(tp, APE_HAS_NCSI); + fwtype = "NCSI"; + } else { + fwtype = "DASH"; + } + + vlen = strlen(tp->fw_ver); + + snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d", + fwtype, + (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT, + (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT, + (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT, + (apedata & APE_FW_VERSION_BLDMSK)); +} + +static void __devinit tg3_read_fw_ver(struct tg3 *tp) +{ + u32 val; + bool vpd_vers = false; + + if (tp->fw_ver[0] != 0) + vpd_vers = true; + + if (tg3_flag(tp, NO_NVRAM)) { + strcat(tp->fw_ver, "sb"); + return; + } + + if (tg3_nvram_read(tp, 0, &val)) + return; + + if (val == TG3_EEPROM_MAGIC) + tg3_read_bc_ver(tp); + else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) + tg3_read_sb_ver(tp, val); + else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW) + tg3_read_hwsb_ver(tp); + else + return; + + if (vpd_vers) + goto done; + + if (tg3_flag(tp, ENABLE_APE)) { + if (tg3_flag(tp, ENABLE_ASF)) + tg3_read_dash_ver(tp); + } else if (tg3_flag(tp, ENABLE_ASF)) { + tg3_read_mgmtfw_ver(tp); + } + +done: + tp->fw_ver[TG3_VER_SIZE - 1] = 0; +} + +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); + +static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) +{ + if (tg3_flag(tp, LRG_PROD_RING_CAP)) + return TG3_RX_RET_MAX_SIZE_5717; + else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) + return TG3_RX_RET_MAX_SIZE_5700; + else + return TG3_RX_RET_MAX_SIZE_5705; +} + +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, + { }, +}; + +static int __devinit tg3_get_invariants(struct tg3 *tp) +{ + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tp->pci_chip_rev_id = (misc_ctrl_reg >> + MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN2_PRODID_ASICREV, + &prod_id_asic_rev); + else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) + pci_read_config_dword(tp->pdev, + TG3PCI_GEN15_PRODID_ASICREV, + &prod_id_asic_rev); + else + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + + tp->pci_chip_rev_id = prod_id_asic_rev; + } + + /* Wrong chip ID in 5752 A0. This code can be removed later + * as A0 is not in production. + */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) + tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, + * we need to disable memory and use config. cycles + * only to access all registers. The 5702/03 chips + * can mistakenly decode the special cycles from the + * ICH chipsets as memory write cycles, causing corruption + * of register and memory space. Only certain ICH bridges + * will drive special cycles with non-zero data during the + * address phase which can fall within the 5703's address + * range. This is not an ICH bug as the PCI spec allows + * non-zero address during special cycles. However, only + * these ICH bridges are known to drive non-zero addresses + * during special cycles. + * + * Since special cycles do not cross PCI bridges, we only + * enable this workaround if the 5703 is on the secondary + * bus of these ICH bridges. + */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) || + (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + u32 rev; + } ich_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8, + PCI_ANY_ID }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11, + 0xa }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6, + PCI_ANY_ID }, + { }, + }; + struct tg3_dev_id *pci_id = &ich_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (pci_id->rev != PCI_ANY_ID) { + if (bridge->revision > pci_id->rev) + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number == + tp->pdev->bus->number)) { + tg3_flag_set(tp, ICH_WORKAROUND); + pci_dev_put(bridge); + break; + } + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + static struct tg3_dev_id { + u32 vendor; + u32 device; + } bridge_chipsets[] = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 }, + { }, + }; + struct tg3_dev_id *pci_id = &bridge_chipsets[0]; + struct pci_dev *bridge = NULL; + + while (pci_id->vendor != 0) { + bridge = pci_get_device(pci_id->vendor, + pci_id->device, + bridge); + if (!bridge) { + pci_id++; + continue; + } + if (bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 5701_DMA_BUG); + pci_dev_put(bridge); + break; + } + } + } + + /* The EPB bridge inside 5714, 5715, and 5780 cannot support + * DMA addresses > 40-bit. This bridge may have other additional + * 57xx devices behind it in some 4-port NIC designs for example. + * Any tg3 device found behind the bridge will also need the 40-bit + * DMA workaround. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + tg3_flag_set(tp, 5780_CLASS); + tg3_flag_set(tp, 40BIT_DMA_BUG); + tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); + } else { + struct pci_dev *bridge = NULL; + + do { + bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_EPB, + bridge); + if (bridge && bridge->subordinate && + (bridge->subordinate->number <= + tp->pdev->bus->number) && + (bridge->subordinate->subordinate >= + tp->pdev->bus->number)) { + tg3_flag_set(tp, 40BIT_DMA_BUG); + pci_dev_put(bridge); + break; + } + } while (bridge); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + tp->pdev_peer = tg3_find_peer(tp); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); + + /* Determine TSO capabilities */ + if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) + ; /* Do nothing. HW bug. */ + else if (tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, HW_TSO_3); + else if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, HW_TSO_2); + else if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, HW_TSO_1); + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && + tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) + tg3_flag_clear(tp, TSO_BUG); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { + tg3_flag_set(tp, TSO_BUG); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) + tp->fw_needed = FIRMWARE_TG3TSO5; + else + tp->fw_needed = FIRMWARE_TG3TSO; + } + + /* Selectively allow TSO based on operating conditions */ + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3) || + (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tg3_flag_set(tp, TSO_CAPABLE); + else { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) + tp->fw_needed = FIRMWARE_TG3; + + tp->irq_max = 1; + + if (tg3_flag(tp, 5750_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSI); + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 && + tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 && + tp->pdev_peer == tp->pdev)) + tg3_flag_clear(tp, SUPPORT_MSI); + + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tg3_flag_set(tp, 1SHOT_MSI); + } + + if (tg3_flag(tp, 57765_PLUS)) { + tg3_flag_set(tp, SUPPORT_MSIX); + tp->irq_max = TG3_IRQ_MAX_VECS; + } + } + + if (tg3_flag(tp, 5755_PLUS)) + tg3_flag_set(tp, SHORT_DMA_BUG); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_flag_set(tp, 4K_FIFO_LIMIT); + + if (tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, LRG_PROD_RING_CAP); + + if (tg3_flag(tp, 57765_PLUS) && + tp->pci_chip_rev_id != CHIPREV_ID_5719_A0) + tg3_flag_set(tp, USE_JUMBO_BDFLAG); + + if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, USE_JUMBO_BDFLAG)) + tg3_flag_set(tp, JUMBO_CAPABLE); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + + if (pci_is_pcie(tp->pdev)) { + u16 lnkctl; + + tg3_flag_set(tp, PCI_EXPRESS); + + tp->pcie_readrq = 4096; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tp->pcie_readrq = 2048; + + pcie_set_readrq(tp->pdev, tp->pcie_readrq); + + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5906) { + tg3_flag_clear(tp, HW_TSO_2); + tg3_flag_clear(tp, TSO_CAPABLE); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_57780_A1) + tg3_flag_set(tp, CLKREQ_BUG); + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) { + tg3_flag_set(tp, L1PLLPD_EN); + } + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + /* BCM5785 devices are effectively PCIe devices, and should + * follow PCIe codepaths, but do not have a PCIe capabilities + * section. + */ + tg3_flag_set(tp, PCI_EXPRESS); + } else if (!tg3_flag(tp, 5705_PLUS) || + tg3_flag(tp, 5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + dev_err(&tp->pdev->dev, + "Cannot find PCI-X capability, aborting\n"); + return -EIO; + } + + if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE)) + tg3_flag_set(tp, PCIX_MODE); + } + + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(tg3_write_reorder_chipsets) && + !tg3_flag(tp, PCI_EXPRESS)) + tg3_flag_set(tp, MBOX_WRITE_REORDER); + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + &tp->pci_cacheline_sz); + pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, + &tp->pci_lat_timer); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + tp->pci_lat_timer < 64) { + tp->pci_lat_timer = 64; + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } + + /* Important! -- It is critical that the PCI-X hw workaround + * situation is decided before the first MMIO register access. + */ + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { + /* 5700 BX chips need to have their TX producer index + * mailboxes written twice to workaround a bug. + */ + tg3_flag_set(tp, TXD_MBOX_HWBUG); + + /* If we are in PCI-X mode, enable register write workaround. + * + * The workaround is to use indirect register accesses + * for all chip writes not to mailbox registers. + */ + if (tg3_flag(tp, PCIX_MODE)) { + u32 pm_reg; + + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + + /* The chip can have it's power management PCI config + * space registers clobbered due to this bug. + * So explicitly force the chip into D0 here. + */ + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + &pm_reg); + pm_reg &= ~PCI_PM_CTRL_STATE_MASK; + pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, + pm_reg); + + /* Also, force SERR#/PERR# in PCI command. */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + } + + if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) + tg3_flag_set(tp, PCI_HIGH_SPEED); + if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) + tg3_flag_set(tp, PCI_32BIT); + + /* Chip-specific fixup from Broadcom driver */ + if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) && + (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) { + pci_state_reg |= PCISTATE_RETRY_SAME_DMA; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg); + } + + /* Default fast path register access methods */ + tp->read32 = tg3_read32; + tp->write32 = tg3_write32; + tp->read32_mbox = tg3_read32; + tp->write32_mbox = tg3_write32; + tp->write32_tx_mbox = tg3_write32; + tp->write32_rx_mbox = tg3_write32; + + /* Various workaround register access methods */ + if (tg3_flag(tp, PCIX_TARGET_HWBUG)) + tp->write32 = tg3_write_indirect_reg32; + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || + (tg3_flag(tp, PCI_EXPRESS) && + tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) { + /* + * Back to back register writes can cause problems on these + * chips, the workaround is to read back all reg writes + * except those to mailbox regs. + * + * See tg3_write_indirect_reg32(). + */ + tp->write32 = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + if (tg3_flag(tp, MBOX_WRITE_REORDER)) + tp->write32_rx_mbox = tg3_write_flush_reg32; + } + + if (tg3_flag(tp, ICH_WORKAROUND)) { + tp->read32 = tg3_read_indirect_reg32; + tp->write32 = tg3_write_indirect_reg32; + tp->read32_mbox = tg3_read_indirect_mbox; + tp->write32_mbox = tg3_write_indirect_mbox; + tp->write32_tx_mbox = tg3_write_indirect_mbox; + tp->write32_rx_mbox = tg3_write_indirect_mbox; + + iounmap(tp->regs); + tp->regs = NULL; + + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_MEMORY; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->read32_mbox = tg3_read32_mbox_5906; + tp->write32_mbox = tg3_write32_mbox_5906; + tp->write32_tx_mbox = tg3_write32_mbox_5906; + tp->write32_rx_mbox = tg3_write32_mbox_5906; + } + + if (tp->write32 == tg3_write_indirect_reg32 || + (tg3_flag(tp, PCIX_MODE) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701))) + tg3_flag_set(tp, SRAM_USE_CONFIG); + + /* The memory arbiter has to be enabled in order for SRAM accesses + * to succeed. Normally on powerup the tg3 chip firmware will make + * sure it is enabled, but other entities such as system netboot + * code might disable it. + */ + val = tr32(MEMARB_MODE); + tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); + + if (tg3_flag(tp, PCIX_MODE)) { + pci_read_config_dword(tp->pdev, + tp->pcix_cap + PCI_X_STATUS, &val); + tp->pci_fn = val & 0x7; + } else { + tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; + } + + /* Get eeprom hw config before calling tg3_set_power_state(). + * In particular, the TG3_FLAG_IS_NIC flag must be + * determined before calling tg3_set_power_state() so that + * we know whether or not to switch out of Vaux power. + * When the flag is set, it means that GPIO1 is used for eeprom + * write protect and also implies that it is a LOM where GPIOs + * are not used to switch power. + */ + tg3_get_eeprom_hw_cfg(tp); + + if (tg3_flag(tp, ENABLE_APE)) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR | + PCISTATE_ALLOW_APE_PSPACE_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + + tg3_ape_lock_init(tp); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, CPMU_PRESENT); + + /* Set up tp->grc_local_ctrl before calling + * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high + * will bring 5700's external PHY out of reset. + * It is also used as eeprom write protect on LOMs. + */ + tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tg3_flag(tp, EEPROM_WRITE_PROT)) + tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | + GRC_LCLCTRL_GPIO_OUTPUT1); + /* Unused GPIO3 must be driven as output on 5752 because there + * are no pull-up resistors on unused GPIO pins. + */ + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) { + /* Turn off the debug UART. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; + if (tg3_flag(tp, IS_NIC)) + /* Keep VMain power. */ + tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 | + GRC_LCLCTRL_GPIO_OUTPUT0; + } + + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + + /* Derive initial jumbo mode from MTU assigned in + * ether_setup() via the alloc_etherdev() call + */ + if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, JUMBO_RING_ENABLE); + + /* Determine WakeOnLan speed to use. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) { + tg3_flag_clear(tp, WOL_SPEED_100MB); + } else { + tg3_flag_set(tp, WOL_SPEED_100MB); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tp->phy_flags |= TG3_PHYFLG_IS_FET; + + /* A few boards don't want Ethernet@WireSpeed phy feature */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) && + (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) || + (tp->phy_flags & TG3_PHYFLG_IS_FET) || + (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED; + + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX || + GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX) + tp->phy_flags |= TG3_PHYFLG_ADC_BUG; + if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) + tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG; + + if (tg3_flag(tp, 5705_PLUS) && + !(tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && + !tg3_flag(tp, 57765_PLUS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { + if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && + tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) + tp->phy_flags |= TG3_PHYFLG_JITTER_BUG; + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) + tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM; + } else + tp->phy_flags |= TG3_PHYFLG_BER_BUG; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) { + tp->phy_otp = tg3_read_otp_phycfg(tp); + if (tp->phy_otp == 0) + tp->phy_otp = TG3_OTP_DEFAULT; + } + + if (tg3_flag(tp, CPMU_PRESENT)) + tp->mi_mode = MAC_MI_MODE_500KHZ_CONST; + else + tp->mi_mode = MAC_MI_MODE_BASE; + + tp->coalesce_mode = 0; + if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) + tp->coalesce_mode |= HOSTCC_MODE_32BYTE; + + /* Set these bits to enable statistics workaround. */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) { + tp->coalesce_mode |= HOSTCC_MODE_ATTN; + tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN; + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + tg3_flag_set(tp, USE_PHYLIB); + + err = tg3_mdio_init(tp); + if (err) + return err; + + /* Initialize data/descriptor byte/word swapping. */ + val = tr32(GRC_MODE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA | + GRC_MODE_WORD_SWAP_B2HRX_DATA | + GRC_MODE_B2HRX_ENABLE | + GRC_MODE_HTX2B_ENABLE | + GRC_MODE_HOST_STACKUP); + else + val &= GRC_MODE_HOST_STACKUP; + + tw32(GRC_MODE, val | tp->grc_mode); + + tg3_switch_clocks(tp); + + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && + !tg3_flag(tp, PCIX_TARGET_HWBUG)) { + u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl); + + if (chiprevid == CHIPREV_ID_5701_A0 || + chiprevid == CHIPREV_ID_5701_B0 || + chiprevid == CHIPREV_ID_5701_B2 || + chiprevid == CHIPREV_ID_5701_B5) { + void __iomem *sram_base; + + /* Write some dummy words into the SRAM status block + * area, see if it reads back correctly. If the return + * value is bad, force enable the PCIX workaround. + */ + sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK; + + writel(0x00000000, sram_base); + writel(0x00000000, sram_base + 4); + writel(0xffffffff, sram_base + 4); + if (readl(sram_base) != 0x00000000) + tg3_flag_set(tp, PCIX_TARGET_HWBUG); + } + } + + udelay(50); + tg3_nvram_init(tp); + + grc_misc_cfg = tr32(GRC_MISC_CFG); + grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 || + grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) + tg3_flag_set(tp, IS_5788); + + if (!tg3_flag(tp, IS_5788) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) + tg3_flag_set(tp, TAGGED_STATUS); + if (tg3_flag(tp, TAGGED_STATUS)) { + tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD); + + tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + } + + /* Preserve the APE MAC_MODE bits */ + if (tg3_flag(tp, ENABLE_APE)) + tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; + else + tp->mac_mode = 0; + + /* these are limited to 10/100 only */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && + (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) || + (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM && + (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 || + (tp->phy_flags & TG3_PHYFLG_IS_FET)) + tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; + + err = tg3_phy_probe(tp); + if (err) { + dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err); + /* ... but do not return immediately ... */ + tg3_mdio_fini(tp); + } + + tg3_read_vpd(tp); + tg3_read_fw_ver(tp); + + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + else + tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT; + } + + /* 5700 {AX,BX} chips have a broken status block link + * change bit implementation, so we must use the + * status register in those cases. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tg3_flag_set(tp, USE_LINKCHG_REG); + else + tg3_flag_clear(tp, USE_LINKCHG_REG); + + /* The led_ctrl is set during tg3_phy_probe, here we might + * have to force the link status polling mechanism based + * upon subsystem IDs. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT; + tg3_flag_set(tp, USE_LINKCHG_REG); + } + + /* For all SERDES we poll the MAC status register. */ + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + tg3_flag_set(tp, POLL_SERDES); + else + tg3_flag_clear(tp, POLL_SERDES); + + tp->rx_offset = NET_IP_ALIGN; + tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && + tg3_flag(tp, PCIX_MODE)) { + tp->rx_offset = 0; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + tp->rx_copy_thresh = ~(u16)0; +#endif + } + + tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1; + tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1; + tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1; + + tp->rx_std_max_post = tp->rx_std_ring_mask + 1; + + /* Increment the rx prod index on the rx std ring by at most + * 8 for these chips to workaround hw errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tp->rx_std_max_post = 8; + + if (tg3_flag(tp, ASPM_WORKAROUND)) + tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & + PCIE_PWR_MGMT_L1_THRESH_MSK; + + return err; +} + +#ifdef CONFIG_SPARC +static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + struct pci_dev *pdev = tp->pdev; + struct device_node *dp = pci_device_to_OF_node(pdev); + const unsigned char *addr; + int len; + + addr = of_get_property(dp, "local-mac-address", &len); + if (addr && len == 6) { + memcpy(dev->dev_addr, addr, 6); + memcpy(dev->perm_addr, dev->dev_addr, 6); + return 0; + } + return -ENODEV; +} + +static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + + memcpy(dev->dev_addr, idprom->id_ethaddr, 6); + memcpy(dev->perm_addr, idprom->id_ethaddr, 6); + return 0; +} +#endif + +static int __devinit tg3_get_device_address(struct tg3 *tp) +{ + struct net_device *dev = tp->dev; + u32 hi, lo, mac_offset; + int addr_ok = 0; + +#ifdef CONFIG_SPARC + if (!tg3_get_macaddr_sparc(tp)) + return 0; +#endif + + mac_offset = 0x7c; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + tg3_flag(tp, 5780_CLASS)) { + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + mac_offset = 0xcc; + if (tg3_nvram_lock(tp)) + tw32_f(NVRAM_CMD, NVRAM_CMD_RESET); + else + tg3_nvram_unlock(tp); + } else if (tg3_flag(tp, 5717_PLUS)) { + if (tp->pci_fn & 1) + mac_offset = 0xcc; + if (tp->pci_fn > 1) + mac_offset += 0x18c; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + mac_offset = 0x10; + + /* First try to get it from MAC address mailbox. */ + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi); + if ((hi >> 16) == 0x484b) { + dev->dev_addr[0] = (hi >> 8) & 0xff; + dev->dev_addr[1] = (hi >> 0) & 0xff; + + tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo); + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[5] = (lo >> 0) & 0xff; + + /* Some old bootcode may report a 0 MAC address in SRAM */ + addr_ok = is_valid_ether_addr(&dev->dev_addr[0]); + } + if (!addr_ok) { + /* Next, try NVRAM. */ + if (!tg3_flag(tp, NO_NVRAM) && + !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && + !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { + memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); + memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + } + /* Finally just fetch it out of the MAC control regs. */ + else { + hi = tr32(MAC_ADDR_0_HIGH); + lo = tr32(MAC_ADDR_0_LOW); + + dev->dev_addr[5] = lo & 0xff; + dev->dev_addr[4] = (lo >> 8) & 0xff; + dev->dev_addr[3] = (lo >> 16) & 0xff; + dev->dev_addr[2] = (lo >> 24) & 0xff; + dev->dev_addr[1] = hi & 0xff; + dev->dev_addr[0] = (hi >> 8) & 0xff; + } + } + + if (!is_valid_ether_addr(&dev->dev_addr[0])) { +#ifdef CONFIG_SPARC + if (!tg3_get_default_macaddr_sparc(tp)) + return 0; +#endif + return -EINVAL; + } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + return 0; +} + +#define BOUNDARY_SINGLE_CACHELINE 1 +#define BOUNDARY_MULTI_CACHELINE 2 + +static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) +{ + int cacheline_size; + u8 byte; + int goal; + + pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + /* On 5703 and later chips, the boundary bits have no + * effect. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && + !tg3_flag(tp, PCI_EXPRESS)) + goto out; + +#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) + goal = BOUNDARY_MULTI_CACHELINE; +#else +#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) + goal = BOUNDARY_SINGLE_CACHELINE; +#else + goal = 0; +#endif +#endif + + if (tg3_flag(tp, 57765_PLUS)) { + val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; + goto out; + } + + if (!goal) + goto out; + + /* PCI controllers on most RISC systems tend to disconnect + * when a device tries to burst across a cache-line boundary. + * Therefore, letting tg3 do so just wastes PCI bandwidth. + * + * Unfortunately, for PCI-E there are only limited + * write-side controls for this, and thus for reads + * we will still get the disconnects. We'll also waste + * these PCI cycles for both read and write for chips + * other than 5700 and 5701 which do not implement the + * boundary bits. + */ + if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX | + DMA_RWCTRL_WRITE_BNDRY_128_PCIX); + } else { + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + } + break; + + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX | + DMA_RWCTRL_WRITE_BNDRY_256_PCIX); + break; + + default: + val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX | + DMA_RWCTRL_WRITE_BNDRY_384_PCIX); + break; + } + } else if (tg3_flag(tp, PCI_EXPRESS)) { + switch (cacheline_size) { + case 16: + case 32: + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE; + break; + } + /* fallthrough */ + case 128: + default: + val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE; + val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE; + break; + } + } else { + switch (cacheline_size) { + case 16: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_16 | + DMA_RWCTRL_WRITE_BNDRY_16); + break; + } + /* fallthrough */ + case 32: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_32 | + DMA_RWCTRL_WRITE_BNDRY_32); + break; + } + /* fallthrough */ + case 64: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_64 | + DMA_RWCTRL_WRITE_BNDRY_64); + break; + } + /* fallthrough */ + case 128: + if (goal == BOUNDARY_SINGLE_CACHELINE) { + val |= (DMA_RWCTRL_READ_BNDRY_128 | + DMA_RWCTRL_WRITE_BNDRY_128); + break; + } + /* fallthrough */ + case 256: + val |= (DMA_RWCTRL_READ_BNDRY_256 | + DMA_RWCTRL_WRITE_BNDRY_256); + break; + case 512: + val |= (DMA_RWCTRL_READ_BNDRY_512 | + DMA_RWCTRL_WRITE_BNDRY_512); + break; + case 1024: + default: + val |= (DMA_RWCTRL_READ_BNDRY_1024 | + DMA_RWCTRL_WRITE_BNDRY_1024); + break; + } + } + +out: + return val; +} + +static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) +{ + struct tg3_internal_buffer_desc test_desc; + u32 sram_dma_descs; + int i, ret; + + sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE; + + tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0); + tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0); + tw32(RDMAC_STATUS, 0); + tw32(WDMAC_STATUS, 0); + + tw32(BUFMGR_MODE, 0); + tw32(FTQ_RESET, 0); + + test_desc.addr_hi = ((u64) buf_dma) >> 32; + test_desc.addr_lo = buf_dma & 0xffffffff; + test_desc.nic_mbuf = 0x00002100; + test_desc.len = size; + + /* + * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz + * the *second* time the tg3 driver was getting loaded after an + * initial scan. + * + * Broadcom tells me: + * ...the DMA engine is connected to the GRC block and a DMA + * reset may affect the GRC block in some unpredictable way... + * The behavior of resets to individual blocks has not been tested. + * + * Broadcom noted the GRC reset will also reset all sub-components. + */ + if (to_device) { + test_desc.cqid_sqid = (13 << 8) | 2; + + tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE); + udelay(40); + } else { + test_desc.cqid_sqid = (16 << 8) | 7; + + tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE); + udelay(40); + } + test_desc.flags = 0x00000005; + + for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) { + u32 val; + + val = *(((u32 *)&test_desc) + i); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, + sram_dma_descs + (i * sizeof(u32))); + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val); + } + pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); + + if (to_device) + tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); + else + tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); + + ret = -ENODEV; + for (i = 0; i < 40; i++) { + u32 val; + + if (to_device) + val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ); + else + val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ); + if ((val & 0xffff) == sram_dma_descs) { + ret = 0; + break; + } + + udelay(100); + } + + return ret; +} + +#define TEST_BUFFER_SIZE 0x2000 + +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, + { }, +}; + +static int __devinit tg3_test_dma(struct tg3 *tp) +{ + dma_addr_t buf_dma; + u32 *buf, saved_dma_rwctrl; + int ret = 0; + + buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, + &buf_dma, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_nofree; + } + + tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | + (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); + + tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); + + if (tg3_flag(tp, 57765_PLUS)) + goto out; + + if (tg3_flag(tp, PCI_EXPRESS)) { + /* DMA read watermark not used on PCIE */ + tp->dma_rwctrl |= 0x00180000; + } else if (!tg3_flag(tp, PCIX_MODE)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) + tp->dma_rwctrl |= 0x003f0000; + else + tp->dma_rwctrl |= 0x003f000f; + } else { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f); + u32 read_water = 0x7; + + /* If the 5704 is behind the EPB bridge, we can + * do the less restrictive ONE_DMA workaround for + * better performance. + */ + if (tg3_flag(tp, 40BIT_DMA_BUG) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl |= 0x8000; + else if (ccval == 0x6 || ccval == 0x7) + tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) + read_water = 4; + /* Set bit 23 to enable PCIX hw bug fix */ + tp->dma_rwctrl |= + (read_water << DMA_RWCTRL_READ_WATER_SHIFT) | + (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) | + (1 << 23); + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { + /* 5780 always in PCIX mode */ + tp->dma_rwctrl |= 0x00144000; + } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + /* 5714 always in PCIX mode */ + tp->dma_rwctrl |= 0x00148000; + } else { + tp->dma_rwctrl |= 0x001b000f; + } + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) + tp->dma_rwctrl &= 0xfffffff0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + /* Remove this if it causes problems for some boards. */ + tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT; + + /* On 5700/5701 chips, we need to set this bit. + * Otherwise the chip will issue cacheline transactions + * to streamable DMA memory with not all the byte + * enables turned on. This is an error on several + * RISC PCI controllers, in particular sparc64. + * + * On 5703/5704 chips, this bit has been reassigned + * a different meaning. In particular, it is used + * on those chips to enable a PCI-X workaround. + */ + tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + +#if 0 + /* Unneeded, already done by tg3_get_invariants. */ + tg3_switch_clocks(tp); +#endif + + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + goto out; + + /* It is best to perform DMA test with maximum write burst size + * to expose the 5700/5701 write DMA bug. + */ + saved_dma_rwctrl = tp->dma_rwctrl; + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + + while (1) { + u32 *p = buf, i; + + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) + p[i] = i; + + /* Send the buffer to the chip. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); + if (ret) { + dev_err(&tp->pdev->dev, + "%s: Buffer write failed. err = %d\n", + __func__, ret); + break; + } + +#if 0 + /* validate data reached card RAM correctly. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + u32 val; + tg3_read_mem(tp, 0x2100 + (i*4), &val); + if (le32_to_cpu(val) != p[i]) { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on device! " + "(%d != %d)\n", __func__, val, i); + /* ret = -ENODEV here? */ + } + p[i] = 0; + } +#endif + /* Now read it back. */ + ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); + if (ret) { + dev_err(&tp->pdev->dev, "%s: Buffer read failed. " + "err = %d\n", __func__, ret); + break; + } + + /* Verify it. */ + for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { + if (p[i] == i) + continue; + + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + break; + } else { + dev_err(&tp->pdev->dev, + "%s: Buffer corrupted on read back! " + "(%d != %d)\n", __func__, p[i], i); + ret = -ENODEV; + goto out; + } + } + + if (i == (TEST_BUFFER_SIZE / sizeof(u32))) { + /* Success. */ + ret = 0; + break; + } + } + if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != + DMA_RWCTRL_WRITE_BNDRY_16) { + /* DMA test passed without adjusting DMA boundary, + * now look for chipsets that are known to expose the + * DMA bug without failing the test. + */ + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { + tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; + tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; + } else { + /* Safe to use the calculated DMA boundary. */ + tp->dma_rwctrl = saved_dma_rwctrl; + } + + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } + +out: + dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma); +out_nofree: + return ret; +} + +static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) +{ + if (tg3_flag(tp, 57765_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_57765; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_57765; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_57765; + } else if (tg3_flag(tp, 5705_PLUS)) { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER_5705; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5705; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5705; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER_5906; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER_5906; + } + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO_5780; + } else { + tp->bufmgr_config.mbuf_read_dma_low_water = + DEFAULT_MB_RDMA_LOW_WATER; + tp->bufmgr_config.mbuf_mac_rx_low_water = + DEFAULT_MB_MACRX_LOW_WATER; + tp->bufmgr_config.mbuf_high_water = + DEFAULT_MB_HIGH_WATER; + + tp->bufmgr_config.mbuf_read_dma_low_water_jumbo = + DEFAULT_MB_RDMA_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo = + DEFAULT_MB_MACRX_LOW_WATER_JUMBO; + tp->bufmgr_config.mbuf_high_water_jumbo = + DEFAULT_MB_HIGH_WATER_JUMBO; + } + + tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER; + tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; +} + +static char * __devinit tg3_phy_string(struct tg3 *tp) +{ + switch (tp->phy_id & TG3_PHY_ID_MASK) { + case TG3_PHY_ID_BCM5400: return "5400"; + case TG3_PHY_ID_BCM5401: return "5401"; + case TG3_PHY_ID_BCM5411: return "5411"; + case TG3_PHY_ID_BCM5701: return "5701"; + case TG3_PHY_ID_BCM5703: return "5703"; + case TG3_PHY_ID_BCM5704: return "5704"; + case TG3_PHY_ID_BCM5705: return "5705"; + case TG3_PHY_ID_BCM5750: return "5750"; + case TG3_PHY_ID_BCM5752: return "5752"; + case TG3_PHY_ID_BCM5714: return "5714"; + case TG3_PHY_ID_BCM5780: return "5780"; + case TG3_PHY_ID_BCM5755: return "5755"; + case TG3_PHY_ID_BCM5787: return "5787"; + case TG3_PHY_ID_BCM5784: return "5784"; + case TG3_PHY_ID_BCM5756: return "5722/5756"; + case TG3_PHY_ID_BCM5906: return "5906"; + case TG3_PHY_ID_BCM5761: return "5761"; + case TG3_PHY_ID_BCM5718C: return "5718C"; + case TG3_PHY_ID_BCM5718S: return "5718S"; + case TG3_PHY_ID_BCM57765: return "57765"; + case TG3_PHY_ID_BCM5719C: return "5719C"; + case TG3_PHY_ID_BCM5720C: return "5720C"; + case TG3_PHY_ID_BCM8002: return "8002/serdes"; + case 0: return "serdes"; + default: return "unknown"; + } +} + +static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) +{ + if (tg3_flag(tp, PCI_EXPRESS)) { + strcpy(str, "PCI Express"); + return str; + } else if (tg3_flag(tp, PCIX_MODE)) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + } else { + strcpy(str, "PCI:"); + if (tg3_flag(tp, PCI_HIGH_SPEED)) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tg3_flag(tp, PCI_32BIT)) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; +} + +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) +{ + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; + + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. + */ + if (!peer) { + peer = tp->pdev; + return peer; + } + + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other + */ + pci_dev_put(peer); + + return peer; +} + +static void __devinit tg3_init_coal(struct tg3 *tp) +{ + struct ethtool_coalesce *ec = &tp->coal; + + memset(ec, 0, sizeof(*ec)); + ec->cmd = ETHTOOL_GCOALESCE; + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS; + ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES; + ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT; + ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT; + ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT; + ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS; + + if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD | + HOSTCC_MODE_CLRTICK_TXBD)) { + ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS; + ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS; + ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS; + ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS; + } + + if (tg3_flag(tp, 5705_PLUS)) { + ec->rx_coalesce_usecs_irq = 0; + ec->tx_coalesce_usecs_irq = 0; + ec->stats_block_coalesce_usecs = 0; + } +} + +static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + +static int __devinit tg3_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev; + struct tg3 *tp; + int i, err, pm_cap; + u32 sndmbx, rcvmbx, intmbx; + char str[40]; + u64 dma_mask, persist_dma_mask; + u32 features = 0; + + printk_once(KERN_INFO "%s\n", version); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* Find power-management capability. */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap == 0) { + dev_err(&pdev->dev, + "Cannot find Power Management capability, aborting\n"); + err = -EIO; + goto err_out_free_res; + } + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + dev_err(&pdev->dev, "Transition to D0 failed, aborting\n"); + goto err_out_free_res; + } + + dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); + if (!dev) { + dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); + err = -ENOMEM; + goto err_out_power_down; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + tp = netdev_priv(dev); + tp->pdev = pdev; + tp->dev = dev; + tp->pm_cap = pm_cap; + tp->rx_mode = TG3_DEF_RX_MODE; + tp->tx_mode = TG3_DEF_TX_MODE; + + if (tg3_debug > 0) + tp->msg_enable = tg3_debug; + else + tp->msg_enable = TG3_DEF_MSG_ENABLE; + + /* The word/byte swap controls here control register access byte + * swapping. DMA data byte swapping is controlled in the GRC_MODE + * setting below. + */ + tp->misc_host_ctrl = + MISC_HOST_CTRL_MASK_PCI_INT | + MISC_HOST_CTRL_WORD_SWAP | + MISC_HOST_CTRL_INDIR_ACCESS | + MISC_HOST_CTRL_PCISTATE_RW; + + /* The NONFRM (non-frame) byte/word swap controls take effect + * on descriptor entries, anything which isn't packet data. + * + * The StrongARM chips on the board (one for tx, one for rx) + * are running in big-endian mode. + */ + tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA | + GRC_MODE_WSWAP_NONFRM_DATA); +#ifdef __BIG_ENDIAN + tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; +#endif + spin_lock_init(&tp->lock); + spin_lock_init(&tp->indirect_lock); + INIT_WORK(&tp->reset_task, tg3_reset_task); + + tp->regs = pci_ioremap_bar(pdev, BAR_0); + if (!tp->regs) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + err = -ENOMEM; + goto err_out_free_dev; + } + + if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 || + tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || + tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { + tg3_flag_set(tp, ENABLE_APE); + tp->aperegs = pci_ioremap_bar(pdev, BAR_2); + if (!tp->aperegs) { + dev_err(&pdev->dev, + "Cannot map APE registers, aborting\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + } + + tp->rx_pending = TG3_DEF_RX_RING_PENDING; + tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; + + dev->ethtool_ops = &tg3_ethtool_ops; + dev->watchdog_timeo = TG3_TX_TIMEOUT; + dev->netdev_ops = &tg3_netdev_ops; + dev->irq = pdev->irq; + + err = tg3_get_invariants(tp); + if (err) { + dev_err(&pdev->dev, + "Problem fetching invariants of chip, aborting\n"); + goto err_out_apeunmap; + } + + /* The EPB bridge inside 5714, 5715, and 5780 and any + * device behind the EPB cannot support DMA addresses > 40-bit. + * On 64-bit systems with IOMMU, use 40-bit dma_mask. + * On 64-bit systems without IOMMU, use 64-bit dma_mask and + * do DMA address check in tg3_start_xmit(). + */ + if (tg3_flag(tp, IS_5788)) + persist_dma_mask = dma_mask = DMA_BIT_MASK(32); + else if (tg3_flag(tp, 40BIT_DMA_BUG)) { + persist_dma_mask = dma_mask = DMA_BIT_MASK(40); +#ifdef CONFIG_HIGHMEM + dma_mask = DMA_BIT_MASK(64); +#endif + } else + persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + + /* Configure DMA attributes. */ + if (dma_mask > DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, + persist_dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "Unable to obtain 64 bit " + "DMA for consistent allocations\n"); + goto err_out_apeunmap; + } + } + } + if (err || dma_mask == DMA_BIT_MASK(32)) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_out_apeunmap; + } + } + + tg3_init_bufmgr_config(tp); + + features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + /* 5700 B0 chips do not support checksumming correctly due + * to hardware bugs. + */ + if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) { + features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + + if (tg3_flag(tp, 5755_PLUS)) + features |= NETIF_F_IPV6_CSUM; + } + + /* TSO is on by default on chips that support hardware TSO. + * Firmware TSO on older chips gives lower performance, so it + * is off by default, but can be enabled using ethtool. + */ + if ((tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) && + (features & NETIF_F_IP_CSUM)) + features |= NETIF_F_TSO; + if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) { + if (features & NETIF_F_IPV6_CSUM) + features |= NETIF_F_TSO6; + if (tg3_flag(tp, HW_TSO_3) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && + GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) + features |= NETIF_F_TSO_ECN; + } + + dev->features |= features; + dev->vlan_features |= features; + + /* + * Add loopback capability only for a subset of devices that support + * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY + * loopback for the remaining devices. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && + !tg3_flag(tp, CPMU_PRESENT)) + /* Add the loopback capability */ + features |= NETIF_F_LOOPBACK; + + dev->hw_features |= features; + + if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && + !tg3_flag(tp, TSO_CAPABLE) && + !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { + tg3_flag_set(tp, MAX_RXPEND_64); + tp->rx_pending = 63; + } + + err = tg3_get_device_address(tp); + if (err) { + dev_err(&pdev->dev, + "Could not obtain valid ethernet address, aborting\n"); + goto err_out_apeunmap; + } + + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + + intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; + rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; + sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; + for (i = 0; i < tp->irq_max; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; + + tnapi->tp = tp; + tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; + + tnapi->int_mbox = intmbx; + if (i <= 4) + intmbx += 0x8; + else + intmbx += 0x4; + + tnapi->consmbox = rcvmbx; + tnapi->prodmbox = sndmbx; + + if (i) + tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); + else + tnapi->coal_now = HOSTCC_MODE_NOW; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + break; + + /* + * If we support MSIX, we'll be using RSS. If we're using + * RSS, the first vector only handles link interrupts and the + * remaining vectors handle rx and tx interrupts. Reuse the + * mailbox values for the next iteration. The values we setup + * above are still useful for the single vectored mode. + */ + if (!i) + continue; + + rcvmbx += 0x8; + + if (sndmbx & 0x4) + sndmbx -= 0x4; + else + sndmbx += 0xc; + } + + tg3_init_coal(tp); + + pci_set_drvdata(pdev, dev); + + if (tg3_flag(tp, 5717_PLUS)) { + /* Resume a low-power mode */ + tg3_frob_aux_power(tp, false); + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "Cannot register net device, aborting\n"); + goto err_out_apeunmap; + } + + netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n", + tp->board_part_number, + tp->pci_chip_rev_id, + tg3_bus_string(tp, str), + dev->dev_addr); + + if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { + struct phy_device *phydev; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", + phydev->drv->name, dev_name(&phydev->dev)); + } else { + char *ethtype; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + ethtype = "10/100Base-TX"; + else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) + ethtype = "1000Base-SX"; + else + ethtype = "10/100/1000Base-T"; + + netdev_info(dev, "attached PHY is %s (%s Ethernet) " + "(WireSpeed[%d], EEE[%d])\n", + tg3_phy_string(tp), ethtype, + (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0, + (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0); + } + + netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n", + (dev->features & NETIF_F_RXCSUM) != 0, + tg3_flag(tp, USE_LINKCHG_REG) != 0, + (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0, + tg3_flag(tp, ENABLE_ASF) != 0, + tg3_flag(tp, TSO_CAPABLE) != 0); + netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n", + tp->dma_rwctrl, + pdev->dma_mask == DMA_BIT_MASK(32) ? 32 : + ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64); + + pci_save_state(pdev); + + return 0; + +err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + +err_out_iounmap: + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + +err_out_free_dev: + free_netdev(dev); + +err_out_power_down: + pci_set_power_state(pdev, PCI_D3hot); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit tg3_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct tg3 *tp = netdev_priv(dev); + + if (tp->fw) + release_firmware(tp->fw); + + cancel_work_sync(&tp->reset_task); + - if (!tg3_flag(tp, USE_PHYLIB)) { ++ if (tg3_flag(tp, USE_PHYLIB)) { + tg3_phy_fini(tp); + tg3_mdio_fini(tp); + } + + unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + if (tp->regs) { + iounmap(tp->regs); + tp->regs = NULL; + } + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +#ifdef CONFIG_PM_SLEEP +static int tg3_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + flush_work_sync(&tp->reset_task); + tg3_phy_stop(tp); + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + + tg3_full_lock(tp, 1); + tg3_disable_ints(tp); + tg3_full_unlock(tp); + + netif_device_detach(dev); + + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_flag_clear(tp, INIT_COMPLETE); + tg3_full_unlock(tp); + + err = tg3_power_down_prepare(tp); + if (err) { + int err2; + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err2 = tg3_restart_hw(tp, 1); + if (err2) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + netif_device_attach(dev); + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err2) + tg3_phy_start(tp); + } + + return err; +} + +static int tg3_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return 0; + + netif_device_attach(dev); + + tg3_full_lock(tp, 0); + + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + if (err) + goto out; + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; +} + +static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +#define TG3_PM_OPS (&tg3_pm_ops) + +#else + +#define TG3_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +/** + * tg3_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET; + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + del_timer_sync(&tp->timer); + tg3_flag_clear(tp, RESTART_TIMER); + + /* Want to make sure that the reset task doesn't run */ + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + tg3_flag_clear(tp, RESTART_TIMER); + + netif_device_detach(netdev); + + /* Clean up software state, even if MMIO is blocked */ + tg3_full_lock(tp, 0); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + tg3_full_unlock(tp); + +done: + if (state == pci_channel_io_perm_failure) + err = PCI_ERS_RESULT_DISCONNECT; + else + pci_disable_device(pdev); + + rtnl_unlock(); + + return err; +} + +/** + * tg3_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + int err; + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); + goto done; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!netif_running(netdev)) { + rc = PCI_ERS_RESULT_RECOVERED; + goto done; + } + + err = tg3_power_up(tp); + if (err) + goto done; + + rc = PCI_ERS_RESULT_RECOVERED; + +done: + rtnl_unlock(); + + return rc; +} + +/** + * tg3_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void tg3_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct tg3 *tp = netdev_priv(netdev); + int err; + + rtnl_lock(); + + if (!netif_running(netdev)) + goto done; + + tg3_full_lock(tp, 0); + tg3_flag_set(tp, INIT_COMPLETE); + err = tg3_restart_hw(tp, 1); + tg3_full_unlock(tp); + if (err) { + netdev_err(netdev, "Cannot restart hardware after reset.\n"); + goto done; + } + + netif_device_attach(netdev); + + tp->timer.expires = jiffies + tp->timer_offset; + add_timer(&tp->timer); + + tg3_netif_start(tp); + + tg3_phy_start(tp); + +done: + rtnl_unlock(); +} + +static struct pci_error_handlers tg3_err_handler = { + .error_detected = tg3_io_error_detected, + .slot_reset = tg3_io_slot_reset, + .resume = tg3_io_resume +}; + +static struct pci_driver tg3_driver = { + .name = DRV_MODULE_NAME, + .id_table = tg3_pci_tbl, + .probe = tg3_init_one, + .remove = __devexit_p(tg3_remove_one), + .err_handler = &tg3_err_handler, + .driver.pm = TG3_PM_OPS, +}; + +static int __init tg3_init(void) +{ + return pci_register_driver(&tg3_driver); +} + +static void __exit tg3_cleanup(void) +{ + pci_unregister_driver(&tg3_driver); +} + +module_init(tg3_init); +module_exit(tg3_cleanup); diff --cc drivers/net/ethernet/jme.c index 7a0c746f2749,000000000000..7becff1f387d mode 100644,000000..100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@@ -1,3236 -1,0 +1,3242 @@@ +/* + * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver + * + * Copyright 2008 JMicron Technology Corporation + * http://www.jmicron.com/ + * Copyright (c) 2009 - 2010 Guo-Fu Tseng + * + * Author: Guo-Fu Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "jme.h" + +static int force_pseudohp = -1; +static int no_pseudohp = -1; +static int no_extplug = -1; +module_param(force_pseudohp, int, 0); +MODULE_PARM_DESC(force_pseudohp, + "Enable pseudo hot-plug feature manually by driver instead of BIOS."); +module_param(no_pseudohp, int, 0); +MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature."); +module_param(no_extplug, int, 0); +MODULE_PARM_DESC(no_extplug, + "Do not use external plug signal for pseudo hot-plug."); + +static int +jme_mdio_read(struct net_device *netdev, int phy, int reg) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, val, again = (reg == MII_BMSR) ? 1 : 0; + +read_again: + jwrite32(jme, JME_SMI, SMI_OP_REQ | + smi_phy_addr(phy) | + smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + val = jread32(jme, JME_SMI); + if ((val & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) { + pr_err("phy(%d) read timeout : %d\n", phy, reg); + return 0; + } + + if (again--) + goto read_again; + + return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; +} + +static void +jme_mdio_write(struct net_device *netdev, + int phy, int reg, int val) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i; + + jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | + ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | + smi_phy_addr(phy) | smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) + pr_err("phy(%d) write timeout : %d\n", phy, reg); +} + +static inline void +jme_reset_phy_processor(struct jme_adapter *jme) +{ + u32 val; + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + val = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + jme_mdio_write(jme->dev, + jme->mii_if.phy_id, + MII_BMCR, val | BMCR_RESET); +} + +static void +jme_setup_wakeup_frame(struct jme_adapter *jme, + const u32 *mask, u32 crc, int fnr) +{ + int i; + + /* + * Setup CRC pattern + */ + jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, crc); + wmb(); + + /* + * Setup Mask + */ + for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { + jwrite32(jme, JME_WFOI, + ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | + (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, mask[i]); + wmb(); + } +} + +static inline void +jme_mac_rxclk_off(struct jme_adapter *jme) +{ + jme->reg_gpreg1 |= GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_rxclk_on(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_txclk_off(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_mac_txclk_on(struct jme_adapter *jme) +{ + u32 speed = jme->reg_ghc & GHC_SPEED; + if (speed == GHC_SPEED_1000M) + jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + else + jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_ghc_speed(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_250A2_workaround(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_assert_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc |= GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_clear_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_mac_processor(struct jme_adapter *jme) +{ + static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; + u32 crc = 0xCDCDCDCD; + u32 gpreg0; + int i; + + jme_reset_ghc_speed(jme); + jme_reset_250A2_workaround(jme); + + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_assert_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + udelay(1); + jme_clear_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + + jwrite32(jme, JME_RXDBA_LO, 0x00000000); + jwrite32(jme, JME_RXDBA_HI, 0x00000000); + jwrite32(jme, JME_RXQDC, 0x00000000); + jwrite32(jme, JME_RXNDA, 0x00000000); + jwrite32(jme, JME_TXDBA_LO, 0x00000000); + jwrite32(jme, JME_TXDBA_HI, 0x00000000); + jwrite32(jme, JME_TXQDC, 0x00000000); + jwrite32(jme, JME_TXNDA, 0x00000000); + + jwrite32(jme, JME_RXMCHT_LO, 0x00000000); + jwrite32(jme, JME_RXMCHT_HI, 0x00000000); + for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) + jme_setup_wakeup_frame(jme, mask, crc, i); + if (jme->fpgaver) + gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL; + else + gpreg0 = GPREG0_DEFAULT; + jwrite32(jme, JME_GPREG0, gpreg0); +} + +static inline void +jme_clear_pm(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); +} + +static int +jme_reload_eeprom(struct jme_adapter *jme) +{ + u32 val; + int i; + + val = jread32(jme, JME_SMBCSR); + + if (val & SMBCSR_EEPROMD) { + val |= SMBCSR_CNACK; + jwrite32(jme, JME_SMBCSR, val); + val |= SMBCSR_RELOAD; + jwrite32(jme, JME_SMBCSR, val); + mdelay(12); + + for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { + mdelay(1); + if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) + break; + } + + if (i == 0) { + pr_err("eeprom reload timeout\n"); + return -EIO; + } + } + + return 0; +} + +static void +jme_load_macaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + unsigned char macaddr[6]; + u32 val; + + spin_lock_bh(&jme->macaddr_lock); + val = jread32(jme, JME_RXUMA_LO); + macaddr[0] = (val >> 0) & 0xFF; + macaddr[1] = (val >> 8) & 0xFF; + macaddr[2] = (val >> 16) & 0xFF; + macaddr[3] = (val >> 24) & 0xFF; + val = jread32(jme, JME_RXUMA_HI); + macaddr[4] = (val >> 0) & 0xFF; + macaddr[5] = (val >> 8) & 0xFF; + memcpy(netdev->dev_addr, macaddr, 6); + spin_unlock_bh(&jme->macaddr_lock); +} + +static inline void +jme_set_rx_pcc(struct jme_adapter *jme, int p) +{ + switch (p) { + case PCC_OFF: + jwrite32(jme, JME_PCCRX0, + ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P1: + jwrite32(jme, JME_PCCRX0, + ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P2: + jwrite32(jme, JME_PCCRX0, + ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + case PCC_P3: + jwrite32(jme, JME_PCCRX0, + ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) | + ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK)); + break; + default: + break; + } + wmb(); + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p); +} + +static void +jme_start_irq(struct jme_adapter *jme) +{ + register struct dynpcc_info *dpi = &(jme->dpi); + + jme_set_rx_pcc(jme, PCC_P1); + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + + jwrite32(jme, JME_PCCTX, + ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) | + ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) | + PCCTXQ0_EN + ); + + /* + * Enable Interrupts + */ + jwrite32(jme, JME_IENS, INTR_ENABLE); +} + +static inline void +jme_stop_irq(struct jme_adapter *jme) +{ + /* + * Disable Interrupts + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); +} + +static u32 +jme_linkstat_from_phy(struct jme_adapter *jme) +{ + u32 phylink, bmsr; + + phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17); + bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR); + if (bmsr & BMSR_ANCOMP) + phylink |= PHY_LINK_AUTONEG_COMPLETE; + + return phylink; +} + +static inline void +jme_set_phyfifo_5level(struct jme_adapter *jme) +{ + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); +} + +static inline void +jme_set_phyfifo_8level(struct jme_adapter *jme) +{ + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); +} + +static int +jme_check_link(struct net_device *netdev, int testonly) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; + char linkmsg[64]; + int rc = 0; + + linkmsg[0] = '\0'; + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + + if (phylink & PHY_LINK_UP) { + if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) { + /* + * If we did not enable AN + * Speed/Duplex Info should be obtained from SMI + */ + phylink = PHY_LINK_UP; + + bmcr = jme_mdio_read(jme->dev, + jme->mii_if.phy_id, + MII_BMCR); + + phylink |= ((bmcr & BMCR_SPEED1000) && + (bmcr & BMCR_SPEED100) == 0) ? + PHY_LINK_SPEED_1000M : + (bmcr & BMCR_SPEED100) ? + PHY_LINK_SPEED_100M : + PHY_LINK_SPEED_10M; + + phylink |= (bmcr & BMCR_FULLDPLX) ? + PHY_LINK_DUPLEX : 0; + + strcat(linkmsg, "Forced: "); + } else { + /* + * Keep polling for speed/duplex resolve complete + */ + while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && + --cnt) { + + udelay(1); + + if (jme->fpgaver) + phylink = jme_linkstat_from_phy(jme); + else + phylink = jread32(jme, JME_PHY_LINK); + } + if (!cnt) + pr_err("Waiting speed resolve timeout\n"); + + strcat(linkmsg, "ANed: "); + } + + if (jme->phylink == phylink) { + rc = 1; + goto out; + } + if (testonly) + goto out; + + jme->phylink = phylink; + + /* + * The speed/duplex setting of jme->reg_ghc already cleared + * by jme_reset_mac_processor() + */ + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme->reg_ghc |= GHC_SPEED_10M; + strcat(linkmsg, "10 Mbps, "); + break; + case PHY_LINK_SPEED_100M: + jme->reg_ghc |= GHC_SPEED_100M; + strcat(linkmsg, "100 Mbps, "); + break; + case PHY_LINK_SPEED_1000M: + jme->reg_ghc |= GHC_SPEED_1000M; + strcat(linkmsg, "1000 Mbps, "); + break; + default: + break; + } + + if (phylink & PHY_LINK_DUPLEX) { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); + jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); + jme->reg_ghc |= GHC_DPX; + } else { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | + TXMCS_BACKOFF | + TXMCS_CARRIERSENSE | + TXMCS_COLLISION); + jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); + } + + jwrite32(jme, JME_GHC, jme->reg_ghc); + + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + if (!(phylink & PHY_LINK_DUPLEX)) + jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme_set_phyfifo_8level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_100M: + jme_set_phyfifo_5level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_1000M: + jme_set_phyfifo_8level(jme); + break; + default: + break; + } + } + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); + + strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? + "Full-Duplex, " : + "Half-Duplex, "); + strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? + "MDI-X" : + "MDI"); + netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg); + netif_carrier_on(netdev); + } else { + if (testonly) + goto out; + + netif_info(jme, link, jme->dev, "Link is down\n"); + jme->phylink = 0; + netif_carrier_off(netdev); + } + +out: + return rc; +} + +static int +jme_setup_tx_resources(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + + txring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + &(txring->dmaalloc), + GFP_ATOMIC); + + if (!txring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + txring->desc = (void *)ALIGN((unsigned long)(txring->alloc), + RING_DESC_ALIGN); + txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN); + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, jme->tx_ring_size); + + txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->tx_ring_size, GFP_ATOMIC); + if (unlikely(!(txring->bufinf))) + goto err_free_txring; + + /* + * Initialize Transmit Descriptors + */ + memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size)); + memset(txring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->tx_ring_size); + + return 0; + +err_free_txring: + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + +err_set_null: + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + + return -ENOMEM; +} + +static void +jme_free_tx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi; + + if (txring->alloc) { + if (txring->bufinf) { + for (i = 0 ; i < jme->tx_ring_size ; ++i) { + txbi = txring->bufinf + i; + if (txbi->skb) { + dev_kfree_skb(txbi->skb); + txbi->skb = NULL; + } + txbi->mapping = 0; + txbi->len = 0; + txbi->nr_desc = 0; + txbi->start_xmit = 0; + } + kfree(txring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + TX_RING_ALLOC_SIZE(jme->tx_ring_size), + txring->alloc, + txring->dmaalloc); + + txring->alloc = NULL; + txring->desc = NULL; + txring->dmaalloc = 0; + txring->dma = 0; + txring->bufinf = NULL; + } + txring->next_to_use = 0; + atomic_set(&txring->next_to_clean, 0); + atomic_set(&txring->nr_free, 0); +} + +static inline void +jme_enable_tx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); + wmb(); + + /* + * Setup TX Queue 0 DMA Bass Address + */ + jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32); + jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL); + + /* + * Setup TX Descptor Count + */ + jwrite32(jme, JME_TXQDC, jme->tx_ring_size); + + /* + * Enable TX Engine + */ + wmb(); + jwrite32f(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + + /* + * Start clock for TX MAC Processor + */ + jme_mac_txclk_on(jme); +} + +static inline void +jme_restart_tx_engine(struct jme_adapter *jme) +{ + /* + * Restart TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); +} + +static inline void +jme_disable_tx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); + wmb(); + + val = jread32(jme, JME_TXCS); + for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_TXCS); + rmb(); + } + + if (!i) + pr_err("Disable TX engine timeout\n"); + + /* + * Stop clock for TX MAC Processor + */ + jme_mac_txclk_off(jme); +} + +static void +jme_set_clean_rxdesc(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + register struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + rxdesc += i; + rxbi += i; + + rxdesc->dw[0] = 0; + rxdesc->dw[1] = 0; + rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); + rxdesc->desc1.bufaddrl = cpu_to_le32( + (__u64)rxbi->mapping & 0xFFFFFFFFUL); + rxdesc->desc1.datalen = cpu_to_le16(rxbi->len); + if (jme->dev->features & NETIF_F_HIGHDMA) + rxdesc->desc1.flags = RXFLAG_64BIT; + wmb(); + rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; +} + +static int +jme_make_new_rx_buf(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf + i; + struct sk_buff *skb; + dma_addr_t mapping; + + skb = netdev_alloc_skb(jme->dev, + jme->dev->mtu + RX_EXTRA_LEN); + if (unlikely(!skb)) + return -ENOMEM; + + mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + if (likely(rxbi->mapping)) + pci_unmap_page(jme->pdev, rxbi->mapping, + rxbi->len, PCI_DMA_FROMDEVICE); + + rxbi->skb = skb; + rxbi->len = skb_tailroom(skb); + rxbi->mapping = mapping; + return 0; +} + +static void +jme_free_rx_buf(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct jme_buffer_info *rxbi = rxring->bufinf; + rxbi += i; + + if (rxbi->skb) { + pci_unmap_page(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxbi->skb); + rxbi->skb = NULL; + rxbi->mapping = 0; + rxbi->len = 0; + } +} + +static void +jme_free_rx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + if (rxring->alloc) { + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + kfree(rxring->bufinf); + } + + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); + rxring->alloc = NULL; + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + } + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); +} + +static int +jme_setup_rx_resources(struct jme_adapter *jme) +{ + int i; + struct jme_ring *rxring = &(jme->rxring[0]); + + rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + &(rxring->dmaalloc), + GFP_ATOMIC); + if (!rxring->alloc) + goto err_set_null; + + /* + * 16 Bytes align + */ + rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc), + RING_DESC_ALIGN); + rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN); + rxring->next_to_use = 0; + atomic_set(&rxring->next_to_clean, 0); + + rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) * + jme->rx_ring_size, GFP_ATOMIC); + if (unlikely(!(rxring->bufinf))) + goto err_free_rxring; + + /* + * Initiallize Receive Descriptors + */ + memset(rxring->bufinf, 0, + sizeof(struct jme_buffer_info) * jme->rx_ring_size); + for (i = 0 ; i < jme->rx_ring_size ; ++i) { + if (unlikely(jme_make_new_rx_buf(jme, i))) { + jme_free_rx_resources(jme); + return -ENOMEM; + } + + jme_set_clean_rxdesc(jme, i); + } + + return 0; + +err_free_rxring: + dma_free_coherent(&(jme->pdev->dev), + RX_RING_ALLOC_SIZE(jme->rx_ring_size), + rxring->alloc, + rxring->dmaalloc); +err_set_null: + rxring->desc = NULL; + rxring->dmaalloc = 0; + rxring->dma = 0; + rxring->bufinf = NULL; + + return -ENOMEM; +} + +static inline void +jme_enable_rx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0); + wmb(); + + /* + * Setup RX DMA Bass Address + */ + jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32); + jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL); + + /* + * Setup RX Descriptor Count + */ + jwrite32(jme, JME_RXQDC, jme->rx_ring_size); + + /* + * Setup Unicast Filter + */ + jme_set_unicastaddr(jme->dev); + jme_set_multi(jme->dev); + + /* + * Enable RX Engine + */ + wmb(); + jwrite32f(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); + + /* + * Start clock for RX MAC Processor + */ + jme_mac_rxclk_on(jme); +} + +static inline void +jme_restart_rx_engine(struct jme_adapter *jme) +{ + /* + * Start RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); +} + +static inline void +jme_disable_rx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs); + wmb(); + + val = jread32(jme, JME_RXCS); + for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_RXCS); + rmb(); + } + + if (!i) + pr_err("Disable RX engine timeout\n"); + + /* + * Stop clock for RX MAC Processor + */ + jme_mac_rxclk_off(jme); +} + +static u16 +jme_udpsum(struct sk_buff *skb) +{ + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; +} + +static int +jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) +{ + if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) + return false; + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) + == RXWBFLAG_TCPON)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { + if (flags & RXWBFLAG_IPV4) + netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); + return false; + } + + if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) + == RXWBFLAG_IPV4)) { + netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n"); + return false; + } + + return true; +} + +static void +jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + struct jme_buffer_info *rxbi = rxring->bufinf; + struct sk_buff *skb; + int framesize; + + rxdesc += idx; + rxbi += idx; + + skb = rxbi->skb; + pci_dma_sync_single_for_cpu(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + if (unlikely(jme_make_new_rx_buf(jme, idx))) { + pci_dma_sync_single_for_device(jme->pdev, + rxbi->mapping, + rxbi->len, + PCI_DMA_FROMDEVICE); + + ++(NET_STAT(jme).rx_dropped); + } else { + framesize = le16_to_cpu(rxdesc->descwb.framesize) + - RX_PREPAD_SIZE; + + skb_reserve(skb, RX_PREPAD_SIZE); + skb_put(skb, framesize); + skb->protocol = eth_type_trans(skb, jme->dev); + + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { + u16 vid = le16_to_cpu(rxdesc->descwb.vlan); + + __vlan_hwaccel_put_tag(skb, vid); + NET_STAT(jme).rx_bytes += 4; + } + jme->jme_rx(skb); + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) == + cpu_to_le16(RXWBFLAG_DEST_MUL)) + ++(NET_STAT(jme).multicast); + + NET_STAT(jme).rx_bytes += framesize; + ++(NET_STAT(jme).rx_packets); + } + + jme_set_clean_rxdesc(jme, idx); + +} + +static int +jme_process_receive(struct jme_adapter *jme, int limit) +{ + struct jme_ring *rxring = &(jme->rxring[0]); + struct rxdesc *rxdesc = rxring->desc; + int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; + + if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning))) + goto out_inc; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out_inc; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out_inc; + + i = atomic_read(&rxring->next_to_clean); + while (limit > 0) { + rxdesc = rxring->desc; + rxdesc += i; + + if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) || + !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) + goto out; + --limit; + + rmb(); + desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; + + if (unlikely(desccnt > 1 || + rxdesc->descwb.errstat & RXWBERR_ALLERR)) { + + if (rxdesc->descwb.errstat & RXWBERR_CRCERR) + ++(NET_STAT(jme).rx_crc_errors); + else if (rxdesc->descwb.errstat & RXWBERR_OVERUN) + ++(NET_STAT(jme).rx_fifo_errors); + else + ++(NET_STAT(jme).rx_errors); + + if (desccnt > 1) + limit -= desccnt - 1; + + for (j = i, ccnt = desccnt ; ccnt-- ; ) { + jme_set_clean_rxdesc(jme, j); + j = (j + 1) & (mask); + } + + } else { + jme_alloc_and_feed_skb(jme, i); + } + + i = (i + desccnt) & (mask); + } + +out: + atomic_set(&rxring->next_to_clean, i); + +out_inc: + atomic_inc(&jme->rx_cleaning); + + return limit > 0 ? limit : 0; + +} + +static void +jme_attempt_pcc(struct dynpcc_info *dpi, int atmp) +{ + if (likely(atmp == dpi->cur)) { + dpi->cnt = 0; + return; + } + + if (dpi->attempt == atmp) { + ++(dpi->cnt); + } else { + dpi->attempt = atmp; + dpi->cnt = 0; + } + +} + +static void +jme_dynamic_pcc(struct jme_adapter *jme) +{ + register struct dynpcc_info *dpi = &(jme->dpi); + + if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P3); + else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD || + dpi->intr_cnt > PCC_INTR_THRESHOLD) + jme_attempt_pcc(dpi, PCC_P2); + else + jme_attempt_pcc(dpi, PCC_P1); + + if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) { + if (dpi->attempt < dpi->cur) + tasklet_schedule(&jme->rxclean_task); + jme_set_rx_pcc(jme, dpi->attempt); + dpi->cur = dpi->attempt; + dpi->cnt = 0; + } +} + +static void +jme_start_pcc_timer(struct jme_adapter *jme) +{ + struct dynpcc_info *dpi = &(jme->dpi); + dpi->last_bytes = NET_STAT(jme).rx_bytes; + dpi->last_pkts = NET_STAT(jme).rx_packets; + dpi->intr_cnt = 0; + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT)); +} + +static inline void +jme_stop_pcc_timer(struct jme_adapter *jme) +{ + jwrite32(jme, JME_TMCSR, 0); +} + +static void +jme_shutdown_nic(struct jme_adapter *jme) +{ + u32 phylink; + + phylink = jme_linkstat_from_phy(jme); + + if (!(phylink & PHY_LINK_UP)) { + /* + * Disable all interrupt before issue timer + */ + jme_stop_irq(jme); + jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE); + } +} + +static void +jme_pcc_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + + if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) { + jme_shutdown_nic(jme); + return; + } + + if (unlikely(!netif_carrier_ok(netdev) || + (atomic_read(&jme->link_changing) != 1) + )) { + jme_stop_pcc_timer(jme); + return; + } + + if (!(test_bit(JME_FLAG_POLL, &jme->flags))) + jme_dynamic_pcc(jme); + + jme_start_pcc_timer(jme); +} + +static inline void +jme_polling_mode(struct jme_adapter *jme) +{ + jme_set_rx_pcc(jme, PCC_OFF); +} + +static inline void +jme_interrupt_mode(struct jme_adapter *jme) +{ + jme_set_rx_pcc(jme, PCC_P1); +} + +static inline int +jme_pseudo_hotplug_enabled(struct jme_adapter *jme) +{ + u32 apmc; + apmc = jread32(jme, JME_APMC); + return apmc & JME_APMC_PSEUDO_HP_EN; +} + +static void +jme_start_shutdown_timer(struct jme_adapter *jme) +{ + u32 apmc; + + apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN; + apmc &= ~JME_APMC_EPIEN_CTRL; + if (!no_extplug) { + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN); + wmb(); + } + jwrite32f(jme, JME_APMC, apmc); + + jwrite32f(jme, JME_TIMER2, 0); + set_bit(JME_FLAG_SHUTDOWN, &jme->flags); + jwrite32(jme, JME_TMCSR, + TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT)); +} + +static void +jme_stop_shutdown_timer(struct jme_adapter *jme) +{ + u32 apmc; + + jwrite32f(jme, JME_TMCSR, 0); + jwrite32f(jme, JME_TIMER2, 0); + clear_bit(JME_FLAG_SHUTDOWN, &jme->flags); + + apmc = jread32(jme, JME_APMC); + apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL); + jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS); + wmb(); + jwrite32f(jme, JME_APMC, apmc); +} + +static void +jme_link_change_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct net_device *netdev = jme->dev; + int rc; + + while (!atomic_dec_and_test(&jme->link_changing)) { + atomic_inc(&jme->link_changing); + netif_info(jme, intr, jme->dev, "Get link change lock failed\n"); + while (atomic_read(&jme->link_changing) != 1) + netif_info(jme, intr, jme->dev, "Waiting link change lock\n"); + } + + if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) + goto out; + + jme->old_mtu = netdev->mtu; + netif_stop_queue(netdev); + if (jme_pseudo_hotplug_enabled(jme)) + jme_stop_shutdown_timer(jme); + + jme_stop_pcc_timer(jme); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + netif_carrier_off(netdev); + } + + jme_check_link(netdev, 0); + if (netif_carrier_ok(netdev)) { + rc = jme_setup_rx_resources(jme); + if (rc) { + pr_err("Allocating resources for RX error, Device STOPPED!\n"); + goto out_enable_tasklet; + } + + rc = jme_setup_tx_resources(jme); + if (rc) { + pr_err("Allocating resources for TX error, Device STOPPED!\n"); + goto err_out_free_rx_resources; + } + + jme_enable_rx_engine(jme); + jme_enable_tx_engine(jme); + + netif_start_queue(netdev); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_interrupt_mode(jme); + + jme_start_pcc_timer(jme); + } else if (jme_pseudo_hotplug_enabled(jme)) { + jme_start_shutdown_timer(jme); + } + + goto out_enable_tasklet; + +err_out_free_rx_resources: + jme_free_rx_resources(jme); +out_enable_tasklet: + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); +out: + atomic_inc(&jme->link_changing); +} + +static void +jme_rx_clean_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct dynpcc_info *dpi = &(jme->dpi); + + jme_process_receive(jme, jme->rx_ring_size); + ++(dpi->intr_cnt); + +} + +static int +jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) +{ + struct jme_adapter *jme = jme_napi_priv(holder); + int rest; + + rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); + + if (rest) { + JME_RX_COMPLETE(netdev, holder); + jme_interrupt_mode(jme); + } + + JME_NAPI_WEIGHT_SET(budget, rest); + return JME_NAPI_WEIGHT_VAL(budget) - rest; +} + +static void +jme_rx_empty_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + return; + + if (unlikely(!netif_carrier_ok(jme->dev))) + return; + + netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n"); + + jme_rx_clean_tasklet(arg); + + while (atomic_read(&jme->rx_empty) > 0) { + atomic_dec(&jme->rx_empty); + ++(NET_STAT(jme).rx_dropped); + jme_restart_rx_engine(jme); + } + atomic_inc(&jme->rx_empty); +} + +static void +jme_wake_queue_if_stopped(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + + smp_wmb(); + if (unlikely(netif_queue_stopped(jme->dev) && + atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { + netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n"); + netif_wake_queue(jme->dev); + } + +} + +static void +jme_tx_clean_tasklet(unsigned long arg) +{ + struct jme_adapter *jme = (struct jme_adapter *)arg; + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; + int i, j, cnt = 0, max, err, mask; + + tx_dbg(jme, "Into txclean\n"); + + if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) + goto out; + + if (unlikely(atomic_read(&jme->link_changing) != 1)) + goto out; + + if (unlikely(!netif_carrier_ok(jme->dev))) + goto out; + + max = jme->tx_ring_size - atomic_read(&txring->nr_free); + mask = jme->tx_ring_mask; + + for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) { + + ctxbi = txbi + i; + + if (likely(ctxbi->skb && + !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { + + tx_dbg(jme, "txclean: %d+%d@%lu\n", + i, ctxbi->nr_desc, jiffies); + + err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; + + for (j = 1 ; j < ctxbi->nr_desc ; ++j) { + ttxbi = txbi + ((i + j) & (mask)); + txdesc[(i + j) & (mask)].dw[0] = 0; + + pci_unmap_page(jme->pdev, + ttxbi->mapping, + ttxbi->len, + PCI_DMA_TODEVICE); + + ttxbi->mapping = 0; + ttxbi->len = 0; + } + + dev_kfree_skb(ctxbi->skb); + + cnt += ctxbi->nr_desc; + + if (unlikely(err)) { + ++(NET_STAT(jme).tx_carrier_errors); + } else { + ++(NET_STAT(jme).tx_packets); + NET_STAT(jme).tx_bytes += ctxbi->len; + } + + ctxbi->skb = NULL; + ctxbi->len = 0; + ctxbi->start_xmit = 0; + + } else { + break; + } + + i = (i + ctxbi->nr_desc) & mask; + + ctxbi->nr_desc = 0; + } + + tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies); + atomic_set(&txring->next_to_clean, i); + atomic_add(cnt, &txring->nr_free); + + jme_wake_queue_if_stopped(jme); + +out: + atomic_inc(&jme->tx_cleaning); +} + +static void +jme_intr_msi(struct jme_adapter *jme, u32 intrstat) +{ + /* + * Disable interrupt + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); + + if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { + /* + * Link change event is critical + * all other events are ignored + */ + jwrite32(jme, JME_IEVE, intrstat); + tasklet_schedule(&jme->linkch_task); + goto out_reenable; + } + + if (intrstat & INTR_TMINTR) { + jwrite32(jme, JME_IEVE, INTR_TMINTR); + tasklet_schedule(&jme->pcc_task); + } + + if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) { + jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0); + tasklet_schedule(&jme->txclean_task); + } + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO | + INTR_PCCRX0 | + INTR_RX0EMP)) | + INTR_RX0); + } + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + if (intrstat & INTR_RX0EMP) + atomic_inc(&jme->rx_empty); + + if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) { + if (likely(JME_RX_SCHEDULE_PREP(jme))) { + jme_polling_mode(jme); + JME_RX_SCHEDULE(jme); + } + } + } else { + if (intrstat & INTR_RX0EMP) { + atomic_inc(&jme->rx_empty); + tasklet_hi_schedule(&jme->rxempty_task); + } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) { + tasklet_hi_schedule(&jme->rxclean_task); + } + } + +out_reenable: + /* + * Re-enable interrupt + */ + jwrite32f(jme, JME_IENS, INTR_ENABLE); +} + +static irqreturn_t +jme_intr(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + /* + * Check if it's really an interrupt for us + */ + if (unlikely((intrstat & INTR_ENABLE) == 0)) + return IRQ_NONE; + + /* + * Check if the device still exist + */ + if (unlikely(intrstat == ~((typeof(intrstat))0))) + return IRQ_NONE; + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; +} + +static irqreturn_t +jme_msi(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct jme_adapter *jme = netdev_priv(netdev); + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + jme_intr_msi(jme, intrstat); + + return IRQ_HANDLED; +} + +static void +jme_reset_link(struct jme_adapter *jme) +{ + jwrite32(jme, JME_TMCSR, TMCSR_SWIT); +} + +static void +jme_restart_an(struct jme_adapter *jme) +{ + u32 bmcr; + + spin_lock_bh(&jme->phy_lock); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + spin_unlock_bh(&jme->phy_lock); +} + +static int +jme_request_irq(struct jme_adapter *jme) +{ + int rc; + struct net_device *netdev = jme->dev; + irq_handler_t handler = jme_intr; + int irq_flags = IRQF_SHARED; + + if (!pci_enable_msi(jme->pdev)) { + set_bit(JME_FLAG_MSI, &jme->flags); + handler = jme_msi; + irq_flags = 0; + } + + rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (rc) { + netdev_err(netdev, + "Unable to request %s interrupt (return: %d)\n", + test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", + rc); + + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + } + } else { + netdev->irq = jme->pdev->irq; + } + + return rc; +} + +static void +jme_free_irq(struct jme_adapter *jme) +{ + free_irq(jme->pdev->irq, jme->dev); + if (test_bit(JME_FLAG_MSI, &jme->flags)) { + pci_disable_msi(jme->pdev); + clear_bit(JME_FLAG_MSI, &jme->flags); + jme->dev->irq = jme->pdev->irq; + } +} + +static inline void +jme_new_phy_on(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL); + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_ENBG; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void +jme_new_phy_off(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL; + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_PDD3COLD; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void +jme_phy_on(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr &= ~BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_on(jme); +} + +static inline void +jme_phy_off(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_off(jme); +} + +static int +jme_open(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + jme_clear_pm(jme); + JME_NAPI_ENABLE(jme); + + tasklet_enable(&jme->linkch_task); + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + rc = jme_request_irq(jme); + if (rc) + goto err_out; + + jme_start_irq(jme); + + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_reset_link(jme); + + return 0; + +err_out: + netif_stop_queue(netdev); + netif_carrier_off(netdev); + return rc; +} + +static void +jme_set_100m_half(struct jme_adapter *jme) +{ + u32 bmcr, tmp; + + jme_phy_on(jme); + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + tmp |= BMCR_SPEED100; + + if (bmcr != tmp) + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp); + + if (jme->fpgaver) + jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL); + else + jwrite32(jme, JME_GHC, GHC_SPEED_100M); +} + +#define JME_WAIT_LINK_TIME 2000 /* 2000ms */ +static void +jme_wait_link(struct jme_adapter *jme) +{ + u32 phylink, to = JME_WAIT_LINK_TIME; + + mdelay(1000); + phylink = jme_linkstat_from_phy(jme); + while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) { + mdelay(10); + phylink = jme_linkstat_from_phy(jme); + } +} + +static void +jme_powersave_phy(struct jme_adapter *jme) +{ + if (jme->reg_pmcs) { + jme_set_100m_half(jme); + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + jme_wait_link(jme); + jme_clear_pm(jme); + } else { + jme_phy_off(jme); + } +} + +static int +jme_close(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + jme_stop_irq(jme); + jme_free_irq(jme); + + JME_NAPI_DISABLE(jme); + + tasklet_disable(&jme->linkch_task); + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + jme->phylink = 0; + jme_phy_off(jme); + + return 0; +} + +static int +jme_alloc_txdesc(struct jme_adapter *jme, + struct sk_buff *skb) +{ + struct jme_ring *txring = &(jme->txring[0]); + int idx, nr_alloc, mask = jme->tx_ring_mask; + + idx = txring->next_to_use; + nr_alloc = skb_shinfo(skb)->nr_frags + 2; + + if (unlikely(atomic_read(&txring->nr_free) < nr_alloc)) + return -1; + + atomic_sub(nr_alloc, &txring->nr_free); + + txring->next_to_use = (txring->next_to_use + nr_alloc) & mask; + + return idx; +} + +static void +jme_fill_tx_map(struct pci_dev *pdev, + struct txdesc *txdesc, + struct jme_buffer_info *txbi, + struct page *page, + u32 page_offset, + u32 len, + u8 hidma) +{ + dma_addr_t dmaaddr; + + dmaaddr = pci_map_page(pdev, + page, + page_offset, + len, + PCI_DMA_TODEVICE); + + pci_dma_sync_single_for_device(pdev, + dmaaddr, + len, + PCI_DMA_TODEVICE); + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->desc2.flags = TXFLAG_OWN; + txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0; + txdesc->desc2.datalen = cpu_to_le16(len); + txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32); + txdesc->desc2.bufaddrl = cpu_to_le32( + (__u64)dmaaddr & 0xFFFFFFFFUL); + + txbi->mapping = dmaaddr; + txbi->len = len; +} + +static void +jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc = txring->desc, *ctxdesc; + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; + int i, nr_frags = skb_shinfo(skb)->nr_frags; + int mask = jme->tx_ring_mask; + const struct skb_frag_struct *frag; + u32 len; + + for (i = 0 ; i < nr_frags ; ++i) { + frag = &skb_shinfo(skb)->frags[i]; + ctxdesc = txdesc + ((idx + i + 2) & (mask)); + ctxbi = txbi + ((idx + i + 2) & (mask)); + + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + skb_frag_page(frag), + frag->page_offset, skb_frag_size(frag), hidma); + } + + len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + ctxdesc = txdesc + ((idx + 1) & (mask)); + ctxbi = txbi + ((idx + 1) & (mask)); + jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + offset_in_page(skb->data), len, hidma); + +} + +static int +jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb) +{ + if (unlikely(skb_shinfo(skb)->gso_size && + skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { + dev_kfree_skb(skb); + return -1; + } + + return 0; +} + +static int +jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) +{ + *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT); + if (*mss) { + *flags |= TXFLAG_LSEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, 0, + IPPROTO_TCP, + 0); + } + + return 0; + } + + return 1; +} + +static void +jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_proto; + + switch (skb->protocol) { + case htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + ip_proto = 0; + break; + } + + switch (ip_proto) { + case IPPROTO_TCP: + *flags |= TXFLAG_TCPCS; + break; + case IPPROTO_UDP: + *flags |= TXFLAG_UDPCS; + break; + default: + netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n"); + break; + } + } +} + +static inline void +jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags) +{ + if (vlan_tx_tag_present(skb)) { + *flags |= TXFLAG_TAGON; + *vlan = cpu_to_le16(vlan_tx_tag_get(skb)); + } +} + +static int +jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct txdesc *txdesc; + struct jme_buffer_info *txbi; + u8 flags; + + txdesc = (struct txdesc *)txring->desc + idx; + txbi = txring->bufinf + idx; + + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->dw[2] = 0; + txdesc->dw[3] = 0; + txdesc->desc1.pktsize = cpu_to_le16(skb->len); + /* + * Set OWN bit at final. + * When kernel transmit faster than NIC. + * And NIC trying to send this descriptor before we tell + * it to start sending this TX queue. + * Other fields are already filled correctly. + */ + wmb(); + flags = TXFLAG_OWN | TXFLAG_INT; + /* + * Set checksum flags while not tso + */ + if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) + jme_tx_csum(jme, skb, &flags); + jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); + jme_map_tx_skb(jme, skb, idx); + txdesc->desc1.flags = flags; + /* + * Set tx buffer info after telling NIC to send + * For better tx_clean timing + */ + wmb(); + txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2; + txbi->skb = skb; + txbi->len = skb->len; + txbi->start_xmit = jiffies; + if (!txbi->start_xmit) + txbi->start_xmit = (0UL-1); + + return 0; +} + +static void +jme_stop_queue_if_full(struct jme_adapter *jme) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf; + int idx = atomic_read(&txring->next_to_clean); + + txbi += idx; + + smp_wmb(); + if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n"); + smp_wmb(); + if (atomic_read(&txring->nr_free) + >= (jme->tx_wake_threshold)) { + netif_wake_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n"); + } + } + + if (unlikely(txbi->start_xmit && + (jiffies - txbi->start_xmit) >= TX_TIMEOUT && + txbi->skb)) { + netif_stop_queue(jme->dev); + netif_info(jme, tx_queued, jme->dev, + "TX Queue Stopped %d@%lu\n", idx, jiffies); + } +} + +/* + * This function is already protected by netif_tx_lock() + */ + +static netdev_tx_t +jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int idx; + + if (unlikely(jme_expand_header(jme, skb))) { + ++(NET_STAT(jme).tx_dropped); + return NETDEV_TX_OK; + } + + idx = jme_alloc_txdesc(jme, skb); + + if (unlikely(idx < 0)) { + netif_stop_queue(netdev); + netif_err(jme, tx_err, jme->dev, + "BUG! Tx ring full when queue awake!\n"); + + return NETDEV_TX_BUSY; + } + + jme_fill_tx_desc(jme, skb, idx); + + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_QUEUE0S | + TXCS_ENABLE); + + tx_dbg(jme, "xmit: %d+%d@%lu\n", + idx, skb_shinfo(skb)->nr_frags + 2, jiffies); + jme_stop_queue_if_full(jme); + + return NETDEV_TX_OK; +} + +static void +jme_set_unicastaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + val = (netdev->dev_addr[3] & 0xff) << 24 | + (netdev->dev_addr[2] & 0xff) << 16 | + (netdev->dev_addr[1] & 0xff) << 8 | + (netdev->dev_addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (netdev->dev_addr[5] & 0xff) << 8 | + (netdev->dev_addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); +} + +static int +jme_set_macaddr(struct net_device *netdev, void *p) +{ + struct jme_adapter *jme = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (netif_running(netdev)) + return -EBUSY; + + spin_lock_bh(&jme->macaddr_lock); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + jme_set_unicastaddr(netdev); + spin_unlock_bh(&jme->macaddr_lock); + + return 0; +} + +static void +jme_set_multi(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 mc_hash[2] = {}; + + spin_lock_bh(&jme->rxmcs_lock); + + jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME; + + if (netdev->flags & IFF_PROMISC) { + jme->reg_rxmcs |= RXMCS_ALLFRAME; + } else if (netdev->flags & IFF_ALLMULTI) { + jme->reg_rxmcs |= RXMCS_ALLMULFRAME; + } else if (netdev->flags & IFF_MULTICAST) { + struct netdev_hw_addr *ha; + int bit_nr; + + jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; + mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); + } + + jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]); + jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]); + } + + wmb(); + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + + spin_unlock_bh(&jme->rxmcs_lock); +} + +static int +jme_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + if (new_mtu == jme->old_mtu) + return 0; + + if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || + ((new_mtu) < IPV6_MIN_MTU)) + return -EINVAL; + + if (new_mtu > 4000) { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; + jme_restart_rx_engine(jme); + } else { + jme->reg_rxcs &= ~RXCS_FIFOTHNP; + jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; + jme_restart_rx_engine(jme); + } + + netdev->mtu = new_mtu; + netdev_update_features(netdev); + + jme_reset_link(jme); + + return 0; +} + +static void +jme_tx_timeout(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + jme->phylink = 0; + jme_reset_phy_processor(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + + /* + * Force to Reset the link again + */ + jme_reset_link(jme); +} + +static inline void jme_pause_rx(struct jme_adapter *jme) +{ + atomic_dec(&jme->link_changing); + + jme_set_rx_pcc(jme, PCC_OFF); + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_DISABLE(jme); + } else { + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + } +} + +static inline void jme_resume_rx(struct jme_adapter *jme) +{ + struct dynpcc_info *dpi = &(jme->dpi); + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + JME_NAPI_ENABLE(jme); + } else { + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + } + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + + atomic_inc(&jme->link_changing); +} + +static void +jme_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(jme->pdev)); +} + +static int +jme_get_regs_len(struct net_device *netdev) +{ + return JME_REG_LEN; +} + +static void +mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len) +{ + int i; + + for (i = 0 ; i < len ; i += 4) + p[i >> 2] = jread32(jme, reg + i); +} + +static void +mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr) +{ + int i; + u16 *p16 = (u16 *)p; + + for (i = 0 ; i < reg_nr ; ++i) + p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i); +} + +static void +jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 *p32 = (u32 *)p; + + memset(p, 0xFF, JME_REG_LEN); + + regs->version = 1; + mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN); + + p32 += 0x100 >> 2; + mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN); + + p32 += 0x100 >> 2; + mdio_memcpy(jme, p32, JME_PHY_REG_NR); +} + +static int +jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + ecmd->tx_coalesce_usecs = PCC_TX_TO; + ecmd->tx_max_coalesced_frames = PCC_TX_CNT; + + if (test_bit(JME_FLAG_POLL, &jme->flags)) { + ecmd->use_adaptive_rx_coalesce = false; + ecmd->rx_coalesce_usecs = 0; + ecmd->rx_max_coalesced_frames = 0; + return 0; + } + + ecmd->use_adaptive_rx_coalesce = true; + + switch (jme->dpi.cur) { + case PCC_P1: + ecmd->rx_coalesce_usecs = PCC_P1_TO; + ecmd->rx_max_coalesced_frames = PCC_P1_CNT; + break; + case PCC_P2: + ecmd->rx_coalesce_usecs = PCC_P2_TO; + ecmd->rx_max_coalesced_frames = PCC_P2_CNT; + break; + case PCC_P3: + ecmd->rx_coalesce_usecs = PCC_P3_TO; + ecmd->rx_max_coalesced_frames = PCC_P3_CNT; + break; + default: + break; + } + + return 0; +} + +static int +jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + struct dynpcc_info *dpi = &(jme->dpi); + + if (netif_running(netdev)) + return -EBUSY; + + if (ecmd->use_adaptive_rx_coalesce && + test_bit(JME_FLAG_POLL, &jme->flags)) { + clear_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_rx; + dpi->cur = PCC_P1; + dpi->attempt = PCC_P1; + dpi->cnt = 0; + jme_set_rx_pcc(jme, PCC_P1); + jme_interrupt_mode(jme); + } else if (!(ecmd->use_adaptive_rx_coalesce) && + !(test_bit(JME_FLAG_POLL, &jme->flags))) { + set_bit(JME_FLAG_POLL, &jme->flags); + jme->jme_rx = netif_receive_skb; + jme_interrupt_mode(jme); + } + + return 0; +} + +static void +jme_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0; + ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0; + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + spin_unlock_bh(&jme->phy_lock); + + ecmd->autoneg = + (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0; +} + +static int +jme_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^ + (ecmd->tx_pause != 0)) { + + if (ecmd->tx_pause) + jme->reg_txpfc |= TXPFC_PF_EN; + else + jme->reg_txpfc &= ~TXPFC_PF_EN; + + jwrite32(jme, JME_TXPFC, jme->reg_txpfc); + } + + spin_lock_bh(&jme->rxmcs_lock); + if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^ + (ecmd->rx_pause != 0)) { + + if (ecmd->rx_pause) + jme->reg_rxmcs |= RXMCS_FLOWCTRL; + else + jme->reg_rxmcs &= ~RXMCS_FLOWCTRL; + + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + } + spin_unlock_bh(&jme->rxmcs_lock); + + spin_lock_bh(&jme->phy_lock); + val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE); + if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^ + (ecmd->autoneg != 0)) { + + if (ecmd->autoneg) + val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + else + val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + jme_mdio_write(jme->dev, jme->mii_if.phy_id, + MII_ADVERTISE, val); + } + spin_unlock_bh(&jme->phy_lock); + + return 0; +} + +static void +jme_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + + wol->wolopts = 0; + + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) + wol->wolopts |= WAKE_PHY; + + if (jme->reg_pmcs & PMCS_MFEN) + wol->wolopts |= WAKE_MAGIC; + +} + +static int +jme_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_MAGICSECURE | + WAKE_UCAST | + WAKE_MCAST | + WAKE_BCAST | + WAKE_ARP)) + return -EOPNOTSUPP; + + jme->reg_pmcs = 0; + + if (wol->wolopts & WAKE_PHY) + jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN; + + if (wol->wolopts & WAKE_MAGIC) + jme->reg_pmcs |= PMCS_MFEN; + + jwrite32(jme, JME_PMCS, jme->reg_pmcs); + device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); + + return 0; +} + +static int +jme_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_gset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + return rc; +} + +static int +jme_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int rc, fdc = 0; + + if (ethtool_cmd_speed(ecmd) == SPEED_1000 + && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + /* + * Check If user changed duplex only while force_media. + * Hardware would not generate link change interrupt. + */ + if (jme->mii_if.force_media && + ecmd->autoneg != AUTONEG_ENABLE && + (jme->mii_if.full_duplex != ecmd->duplex)) + fdc = 1; + + spin_lock_bh(&jme->phy_lock); + rc = mii_ethtool_sset(&(jme->mii_if), ecmd); + spin_unlock_bh(&jme->phy_lock); + + if (!rc) { + if (fdc) + jme_reset_link(jme); + jme->old_ecmd = *ecmd; + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; +} + +static int +jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + int rc; + struct jme_adapter *jme = netdev_priv(netdev); + struct mii_ioctl_data *mii_data = if_mii(rq); + unsigned int duplex_chg; + + if (cmd == SIOCSMIIREG) { + u16 val = mii_data->val_in; + if (!(val & (BMCR_RESET|BMCR_ANENABLE)) && + (val & BMCR_SPEED1000)) + return -EINVAL; + } + + spin_lock_bh(&jme->phy_lock); + rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg); + spin_unlock_bh(&jme->phy_lock); + + if (!rc && (cmd == SIOCSMIIREG)) { + if (duplex_chg) + jme_reset_link(jme); + jme_get_settings(netdev, &jme->old_ecmd); + set_bit(JME_FLAG_SSET, &jme->flags); + } + + return rc; +} + +static u32 +jme_get_link(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP; +} + +static u32 +jme_get_msglevel(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + return jme->msg_enable; +} + +static void +jme_set_msglevel(struct net_device *netdev, u32 value) +{ + struct jme_adapter *jme = netdev_priv(netdev); + jme->msg_enable = value; +} + +static u32 +jme_fix_features(struct net_device *netdev, u32 features) +{ + if (netdev->mtu > 1900) + features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM); + return features; +} + +static int +jme_set_features(struct net_device *netdev, u32 features) +{ + struct jme_adapter *jme = netdev_priv(netdev); + + spin_lock_bh(&jme->rxmcs_lock); + if (features & NETIF_F_RXCSUM) + jme->reg_rxmcs |= RXMCS_CHECKSUM; + else + jme->reg_rxmcs &= ~RXMCS_CHECKSUM; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); + spin_unlock_bh(&jme->rxmcs_lock); + + return 0; +} + +static int +jme_nway_reset(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + jme_restart_an(jme); + return 0; +} + +static u8 +jme_smb_read(struct jme_adapter *jme, unsigned int addr) +{ + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + jwrite32(jme, JME_SMBINTF, + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_READ | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return 0xFF; + } + + return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT; +} + +static void +jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data) +{ + u32 val; + int to; + + val = jread32(jme, JME_SMBCSR); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBCSR_BUSY) && --to) { + msleep(1); + val = jread32(jme, JME_SMBCSR); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + jwrite32(jme, JME_SMBINTF, + ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) | + ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) | + SMBINTF_HWRWN_WRITE | + SMBINTF_HWCMD); + + val = jread32(jme, JME_SMBINTF); + to = JME_SMB_BUSY_TIMEOUT; + while ((val & SMBINTF_HWCMD) && --to) { + msleep(1); + val = jread32(jme, JME_SMBINTF); + } + if (!to) { + netif_err(jme, hw, jme->dev, "SMB Bus Busy\n"); + return; + } + + mdelay(2); +} + +static int +jme_get_eeprom_len(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + val = jread32(jme, JME_SMBCSR); + return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0; +} + +static int +jme_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + /* + * ethtool will check the boundary for us + */ + eeprom->magic = JME_EEPROM_MAGIC; + for (i = 0 ; i < len ; ++i) + data[i] = jme_smb_read(jme, i + offset); + + return 0; +} + +static int +jme_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct jme_adapter *jme = netdev_priv(netdev); + int i, offset = eeprom->offset, len = eeprom->len; + + if (eeprom->magic != JME_EEPROM_MAGIC) + return -EINVAL; + + /* + * ethtool will check the boundary for us + */ + for (i = 0 ; i < len ; ++i) + jme_smb_write(jme, i + offset, data[i]); + + return 0; +} + +static const struct ethtool_ops jme_ethtool_ops = { + .get_drvinfo = jme_get_drvinfo, + .get_regs_len = jme_get_regs_len, + .get_regs = jme_get_regs, + .get_coalesce = jme_get_coalesce, + .set_coalesce = jme_set_coalesce, + .get_pauseparam = jme_get_pauseparam, + .set_pauseparam = jme_set_pauseparam, + .get_wol = jme_get_wol, + .set_wol = jme_set_wol, + .get_settings = jme_get_settings, + .set_settings = jme_set_settings, + .get_link = jme_get_link, + .get_msglevel = jme_get_msglevel, + .set_msglevel = jme_set_msglevel, + .nway_reset = jme_nway_reset, + .get_eeprom_len = jme_get_eeprom_len, + .get_eeprom = jme_get_eeprom, + .set_eeprom = jme_set_eeprom, +}; + +static int +jme_pci_dma64(struct pci_dev *pdev) +{ + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + return 1; + + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) + return 1; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return 0; + + return -1; +} + +static inline void +jme_phy_init(struct jme_adapter *jme) +{ + u16 reg26; + + reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26); + jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); +} + +static inline void +jme_check_hw_ver(struct jme_adapter *jme) +{ + u32 chipmode; + + chipmode = jread32(jme, JME_CHIPMODE); + + jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; + jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; + jme->chip_main_rev = jme->chiprev & 0xF; + jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; +} + +static const struct net_device_ops jme_netdev_ops = { + .ndo_open = jme_open, + .ndo_stop = jme_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = jme_ioctl, + .ndo_start_xmit = jme_start_xmit, + .ndo_set_mac_address = jme_set_macaddr, + .ndo_set_rx_mode = jme_set_multi, + .ndo_change_mtu = jme_change_mtu, + .ndo_tx_timeout = jme_tx_timeout, + .ndo_fix_features = jme_fix_features, + .ndo_set_features = jme_set_features, +}; + +static int __devinit +jme_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc = 0, using_dac, i; + struct net_device *netdev; + struct jme_adapter *jme; + u16 bmcr, bmsr; + u32 apmc; + + /* + * set up PCI device basics + */ + rc = pci_enable_device(pdev); + if (rc) { + pr_err("Cannot enable PCI device\n"); + goto err_out; + } + + using_dac = jme_pci_dma64(pdev); + if (using_dac < 0) { + pr_err("Cannot set PCI DMA Mask\n"); + rc = -EIO; + goto err_out_disable_pdev; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + pr_err("No PCI resource region found\n"); + rc = -ENOMEM; + goto err_out_disable_pdev; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) { + pr_err("Cannot obtain PCI resource region\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + /* + * alloc and init net device + */ + netdev = alloc_etherdev(sizeof(*jme)); + if (!netdev) { + pr_err("Cannot allocate netdev structure\n"); + rc = -ENOMEM; + goto err_out_release_regions; + } + netdev->netdev_ops = &jme_netdev_ops; + netdev->ethtool_ops = &jme_ethtool_ops; + netdev->watchdog_timeo = TX_TIMEOUT; + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; + netdev->features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + if (using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + /* + * init adapter info + */ + jme = netdev_priv(netdev); + jme->pdev = pdev; + jme->dev = netdev; + jme->jme_rx = netif_rx; + jme->old_mtu = netdev->mtu = 1500; + jme->phylink = 0; + jme->tx_ring_size = 1 << 10; + jme->tx_ring_mask = jme->tx_ring_size - 1; + jme->tx_wake_threshold = 1 << 9; + jme->rx_ring_size = 1 << 9; + jme->rx_ring_mask = jme->rx_ring_size - 1; + jme->msg_enable = JME_DEF_MSG_ENABLE; + jme->regs = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!(jme->regs)) { + pr_err("Mapping PCI resource region error\n"); + rc = -ENOMEM; + goto err_out_free_netdev; + } + + if (no_pseudohp) { + apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } else if (force_pseudohp) { + apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN; + jwrite32(jme, JME_APMC, apmc); + } + + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) + + spin_lock_init(&jme->phy_lock); + spin_lock_init(&jme->macaddr_lock); + spin_lock_init(&jme->rxmcs_lock); + + atomic_set(&jme->link_changing, 1); + atomic_set(&jme->rx_cleaning, 1); + atomic_set(&jme->tx_cleaning, 1); + atomic_set(&jme->rx_empty, 1); + + tasklet_init(&jme->pcc_task, + jme_pcc_tasklet, + (unsigned long) jme); + tasklet_init(&jme->linkch_task, + jme_link_change_tasklet, + (unsigned long) jme); + tasklet_init(&jme->txclean_task, + jme_tx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxclean_task, + jme_rx_clean_tasklet, + (unsigned long) jme); + tasklet_init(&jme->rxempty_task, + jme_rx_empty_tasklet, + (unsigned long) jme); + tasklet_disable_nosync(&jme->linkch_task); + tasklet_disable_nosync(&jme->txclean_task); + tasklet_disable_nosync(&jme->rxclean_task); + tasklet_disable_nosync(&jme->rxempty_task); + jme->dpi.cur = PCC_P1; + + jme->reg_ghc = 0; + jme->reg_rxcs = RXCS_DEFAULT; + jme->reg_rxmcs = RXMCS_DEFAULT; + jme->reg_txpfc = 0; + jme->reg_pmcs = PMCS_MFEN; + jme->reg_gpreg1 = GPREG1_DEFAULT; + + if (jme->reg_rxmcs & RXMCS_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + + /* + * Get Max Read Req Size from PCI Config Space + */ + pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs); + jme->mrrs &= PCI_DCSR_MRRS_MASK; + switch (jme->mrrs) { + case MRRS_128B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; + break; + case MRRS_256B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; + break; + default: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; + break; + } + + /* + * Must check before reset_mac_processor + */ + jme_check_hw_ver(jme); + jme->mii_if.dev = netdev; + if (jme->fpgaver) { + jme->mii_if.phy_id = 0; + for (i = 1 ; i < 32 ; ++i) { + bmcr = jme_mdio_read(netdev, i, MII_BMCR); + bmsr = jme_mdio_read(netdev, i, MII_BMSR); + if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) { + jme->mii_if.phy_id = i; + break; + } + } + + if (!jme->mii_if.phy_id) { + rc = -EIO; + pr_err("Can not find phy_id\n"); + goto err_out_unmap; + } + + jme->reg_ghc |= GHC_LINK_POLL; + } else { + jme->mii_if.phy_id = 1; + } + if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme->mii_if.supports_gmii = true; + else + jme->mii_if.supports_gmii = false; + jme->mii_if.phy_id_mask = 0x1F; + jme->mii_if.reg_num_mask = 0x1F; + jme->mii_if.mdio_read = jme_mdio_read; + jme->mii_if.mdio_write = jme_mdio_write; + + jme_clear_pm(jme); + pci_set_power_state(jme->pdev, PCI_D0); + device_set_wakeup_enable(&pdev->dev, true); + + jme_set_phyfifo_5level(jme); + jme->pcirev = pdev->revision; + if (!jme->fpgaver) + jme_phy_init(jme); + jme_phy_off(jme); + + /* + * Reset MAC processor and reload EEPROM for MAC Address + */ + jme_reset_mac_processor(jme); + rc = jme_reload_eeprom(jme); + if (rc) { + pr_err("Reload eeprom for reading MAC Address error\n"); + goto err_out_unmap; + } + jme_load_macaddr(netdev); + + /* + * Tell stack that we are not ready to work until open() + */ + netif_carrier_off(netdev); + + rc = register_netdev(netdev); + if (rc) { + pr_err("Cannot register net device\n"); + goto err_out_unmap; + } + + netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? + "JMC250 Gigabit Ethernet" : + (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? + "JMC260 Fast Ethernet" : "Unknown", + (jme->fpgaver != 0) ? " (FPGA)" : "", + (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, + jme->pcirev, netdev->dev_addr); + + return 0; + +err_out_unmap: + iounmap(jme->regs); +err_out_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_out_release_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return rc; +} + +static void __devexit +jme_remove_one(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + unregister_netdev(netdev); + iounmap(jme->regs); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + +} + +static void +jme_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + + jme_powersave_phy(jme); + pci_pme_active(pdev, true); +} + +#ifdef CONFIG_PM_SLEEP +static int +jme_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + atomic_dec(&jme->link_changing); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + jme_stop_irq(jme); + + tasklet_disable(&jme->txclean_task); + tasklet_disable(&jme->rxclean_task); + tasklet_disable(&jme->rxempty_task); + + if (netif_carrier_ok(netdev)) { + if (test_bit(JME_FLAG_POLL, &jme->flags)) + jme_polling_mode(jme); + + jme_stop_pcc_timer(jme); + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_mac_processor(jme); + jme_free_rx_resources(jme); + jme_free_tx_resources(jme); + netif_carrier_off(netdev); + jme->phylink = 0; + } + + tasklet_enable(&jme->txclean_task); + tasklet_hi_enable(&jme->rxclean_task); + tasklet_hi_enable(&jme->rxempty_task); + + jme_powersave_phy(jme); + + return 0; +} + +static int +jme_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct jme_adapter *jme = netdev_priv(netdev); + ++ if (!netif_running(netdev)) ++ return 0; ++ + jme_clear_pm(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) + jme_set_settings(netdev, &jme->old_ecmd); + else + jme_reset_phy_processor(jme); + + jme_start_irq(jme); + netif_device_attach(netdev); + + atomic_inc(&jme->link_changing); + + jme_reset_link(jme); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume); +#define JME_PM_OPS (&jme_pm_ops) + +#else + +#define JME_PM_OPS NULL +#endif + +static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = { + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, + { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, + { } +}; + +static struct pci_driver jme_driver = { + .name = DRV_NAME, + .id_table = jme_pci_tbl, + .probe = jme_init_one, + .remove = __devexit_p(jme_remove_one), + .shutdown = jme_shutdown, + .driver.pm = JME_PM_OPS, +}; + +static int __init +jme_init_module(void) +{ + pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION); + return pci_register_driver(&jme_driver); +} + +static void __exit +jme_cleanup_module(void) +{ + pci_unregister_driver(&jme_driver); +} + +module_init(jme_init_module); +module_exit(jme_cleanup_module); + +MODULE_AUTHOR("Guo-Fu Tseng "); +MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, jme_pci_tbl); + diff --cc drivers/net/ethernet/mellanox/mlx4/en_tx.c index 75338eb88e88,000000000000..90f2cd24faac mode 100644,000000..100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@@ -1,816 -1,0 +1,816 @@@ +/* + * Copyright (c) 2007 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlx4_en.h" + +enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, +}; + +static int inline_thold __read_mostly = MAX_INLINE; + +module_param_named(inline_thold, inline_thold, int, 0444); +MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); + +int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, int qpn, u32 size, + u16 stride) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int tmp; + int err; + + ring->size = size; + ring->size_mask = size - 1; + ring->stride = stride; + + inline_thold = min(inline_thold, MAX_INLINE); + + spin_lock_init(&ring->comp_lock); + + tmp = size * sizeof(struct mlx4_en_tx_info); + ring->tx_info = vmalloc(tmp); + if (!ring->tx_info) { + en_err(priv, "Failed allocating tx_info ring\n"); + return -ENOMEM; + } + en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", + ring->tx_info, tmp); + + ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); + if (!ring->bounce_buf) { + en_err(priv, "Failed allocating bounce buffer\n"); + err = -ENOMEM; + goto err_tx; + } + ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); + + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, + 2 * PAGE_SIZE); + if (err) { + en_err(priv, "Failed allocating hwq resources\n"); + goto err_bounce; + } + + err = mlx4_en_map_buffer(&ring->wqres.buf); + if (err) { + en_err(priv, "Failed to map TX buffer\n"); + goto err_hwq_res; + } + + ring->buf = ring->wqres.buf.direct.buf; + + en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " + "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, + ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); + + ring->qpn = qpn; + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); + if (err) { + en_err(priv, "Failed allocating qp %d\n", ring->qpn); + goto err_map; + } + ring->qp.event = mlx4_en_sqp_event; + + err = mlx4_bf_alloc(mdev->dev, &ring->bf); + if (err) { + en_dbg(DRV, priv, "working without blueflame (%d)", err); + ring->bf.uar = &mdev->priv_uar; + ring->bf.uar->map = mdev->uar_map; + ring->bf_enabled = false; + } else + ring->bf_enabled = true; + + return 0; + +err_map: + mlx4_en_unmap_buffer(&ring->wqres.buf); +err_hwq_res: + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); +err_bounce: + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; +err_tx: + vfree(ring->tx_info); + ring->tx_info = NULL; + return err; +} + +void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); + + if (ring->bf_enabled) + mlx4_bf_free(mdev->dev, &ring->bf); + mlx4_qp_remove(mdev->dev, &ring->qp); + mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(mdev->dev, ring->qpn, 1); + mlx4_en_unmap_buffer(&ring->wqres.buf); + mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); + kfree(ring->bounce_buf); + ring->bounce_buf = NULL; + vfree(ring->tx_info); + ring->tx_info = NULL; +} + +int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int cq) +{ + struct mlx4_en_dev *mdev = priv->mdev; + int err; + + ring->cqn = cq; + ring->prod = 0; + ring->cons = 0xffffffff; + ring->last_nr_txbb = 1; + ring->poll_cnt = 0; + ring->blocked = 0; + memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); + memset(ring->buf, 0, ring->buf_size); + + ring->qp_state = MLX4_QP_STATE_RST; - ring->doorbell_qpn = swab32(ring->qp.qpn << 8); ++ ring->doorbell_qpn = ring->qp.qpn << 8; + + mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, + ring->cqn, &ring->context); + if (ring->bf_enabled) + ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); + + err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, + &ring->qp, &ring->qp_state); + + return err; +} + +void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + + mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, + MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); +} + + +static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; + struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; + struct sk_buff *skb = tx_info->skb; + struct skb_frag_struct *frag; + void *end = ring->buf + ring->buf_size; + int frags = skb_shinfo(skb)->nr_frags; + int i; + __be32 *ptr = (__be32 *)tx_desc; + __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); + + /* Optimize the common case when there are no wraparounds */ + if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { + if (!tx_info->inl) { + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data[i].addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + } + + } else { + if (!tx_info->inl) { + if ((void *) data >= end) { + data = ring->buf + ((void *)data - end); + } + + if (tx_info->linear) { + pci_unmap_single(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + be32_to_cpu(data->byte_count), + PCI_DMA_TODEVICE); + ++data; + } + + for (i = 0; i < frags; i++) { + /* Check for wraparound before unmapping */ + if ((void *) data >= end) + data = ring->buf; + frag = &skb_shinfo(skb)->frags[i]; + pci_unmap_page(mdev->pdev, + (dma_addr_t) be64_to_cpu(data->addr), + skb_frag_size(frag), PCI_DMA_TODEVICE); + ++data; + } + } + /* Stamp the freed descriptor */ + for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { + *ptr = stamp; + ptr += STAMP_DWORDS; + if ((void *) ptr >= end) { + ptr = ring->buf; + stamp ^= cpu_to_be32(0x80000000); + } + } + + } + dev_kfree_skb_any(skb); + return tx_info->nr_txbb; +} + + +int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int cnt = 0; + + /* Skip last polled descriptor */ + ring->cons += ring->last_nr_txbb; + en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", + ring->cons, ring->prod); + + if ((u32) (ring->prod - ring->cons) > ring->size) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Tx consumer passed producer!\n"); + return 0; + } + + while (ring->cons != ring->prod) { + ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->cons & ring->size_mask, + !!(ring->cons & ring->size)); + ring->cons += ring->last_nr_txbb; + cnt++; + } + + if (cnt) + en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); + + return cnt; +} + + +static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_cq *mcq = &cq->mcq; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + struct mlx4_cqe *cqe = cq->buf; + u16 index; + u16 new_index; + u32 txbbs_skipped = 0; + u32 cq_last_sav; + + /* index always points to the first TXBB of the last polled descriptor */ + index = ring->cons & ring->size_mask; + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + if (index == new_index) + return; + + if (!priv->port_up) + return; + + /* + * We use a two-stage loop: + * - the first samples the HW-updated CQE + * - the second frees TXBBs until the last sample + * This lets us amortize CQE cache misses, while still polling the CQ + * until is quiescent. + */ + cq_last_sav = mcq->cons_index; + do { + do { + /* Skip over last polled CQE */ + index = (index + ring->last_nr_txbb) & ring->size_mask; + txbbs_skipped += ring->last_nr_txbb; + + /* Poll next CQE */ + ring->last_nr_txbb = mlx4_en_free_tx_desc( + priv, ring, index, + !!((ring->cons + txbbs_skipped) & + ring->size)); + ++mcq->cons_index; + + } while (index != new_index); + + new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask; + } while (index != new_index); + AVG_PERF_COUNTER(priv->pstats.tx_coal_avg, + (u32) (mcq->cons_index - cq_last_sav)); + + /* + * To prevent CQ overflow we first update CQ consumer and only then + * the ring consumer. + */ + mlx4_cq_set_ci(mcq); + wmb(); + ring->cons += txbbs_skipped; + + /* Wakeup Tx queue if this ring stopped it */ + if (unlikely(ring->blocked)) { + if ((u32) (ring->prod - ring->cons) <= + ring->size - HEADROOM - MAX_DESC_TXBBS) { + ring->blocked = 0; + netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); + priv->port_stats.wake_queue++; + } + } +} + +void mlx4_en_tx_irq(struct mlx4_cq *mcq) +{ + struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + + if (!spin_trylock(&ring->comp_lock)) + return; + mlx4_en_process_tx_cq(cq->dev, cq); + mod_timer(&cq->timer, jiffies + 1); + spin_unlock(&ring->comp_lock); +} + + +void mlx4_en_poll_tx_cq(unsigned long data) +{ + struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; + struct mlx4_en_priv *priv = netdev_priv(cq->dev); + struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; + u32 inflight; + + INC_PERF_COUNTER(priv->pstats.tx_poll); + + if (!spin_trylock_irq(&ring->comp_lock)) { + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + return; + } + mlx4_en_process_tx_cq(cq->dev, cq); + inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); + + /* If there are still packets in flight and the timer has not already + * been scheduled by the Tx routine then schedule it here to guarantee + * completion processing of these packets */ + if (inflight && priv->port_up) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + spin_unlock_irq(&ring->comp_lock); +} + +static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + u32 index, + unsigned int desc_size) +{ + u32 copy = (ring->size - index) * TXBB_SIZE; + int i; + + for (i = desc_size - copy - 4; i >= 0; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + i)) = + *((u32 *) (ring->bounce_buf + copy + i)); + } + + for (i = copy - 4; i >= 4 ; i -= 4) { + if ((i & (TXBB_SIZE - 1)) == 0) + wmb(); + + *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = + *((u32 *) (ring->bounce_buf + i)); + } + + /* Return real descriptor location */ + return ring->buf + index * TXBB_SIZE; +} + +static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) +{ + struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; + struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; + unsigned long flags; + + /* If we don't have a pending timer, set one up to catch our recent + post in case the interface becomes idle */ + if (!timer_pending(&cq->timer)) + mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); + + /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ + if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) + if (spin_trylock_irqsave(&ring->comp_lock, flags)) { + mlx4_en_process_tx_cq(priv->dev, cq); + spin_unlock_irqrestore(&ring->comp_lock, flags); + } +} + +static int is_inline(struct sk_buff *skb, void **pfrag) +{ + void *ptr; + + if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { + if (skb_shinfo(skb)->nr_frags == 1) { + ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); + if (unlikely(!ptr)) + return 0; + + if (pfrag) + *pfrag = ptr; + + return 1; + } else if (unlikely(skb_shinfo(skb)->nr_frags)) + return 0; + else + return 1; + } + + return 0; +} + +static int inline_size(struct sk_buff *skb) +{ + if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) + <= MLX4_INLINE_ALIGN) + return ALIGN(skb->len + CTRL_SIZE + + sizeof(struct mlx4_wqe_inline_seg), 16); + else + return ALIGN(skb->len + CTRL_SIZE + 2 * + sizeof(struct mlx4_wqe_inline_seg), 16); +} + +static int get_real_size(struct sk_buff *skb, struct net_device *dev, + int *lso_header_size) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int real_size; + + if (skb_is_gso(skb)) { + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + + ALIGN(*lso_header_size + 4, DS_SIZE); + if (unlikely(*lso_header_size != skb_headlen(skb))) { + /* We add a segment for the skb linear buffer only if + * it contains data */ + if (*lso_header_size < skb_headlen(skb)) + real_size += DS_SIZE; + else { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Non-linear headers\n"); + return 0; + } + } + } else { + *lso_header_size = 0; + if (!is_inline(skb, NULL)) + real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; + else + real_size = inline_size(skb); + } + + return real_size; +} + +static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, + int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) +{ + struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + + if (skb->len <= spc) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, + skb_frag_size(&skb_shinfo(skb)->frags[0])); + + } else { + inl->byte_count = cpu_to_be32(1 << 31 | spc); + if (skb_headlen(skb) <= spc) { + skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); + if (skb_headlen(skb) < spc) { + memcpy(((void *)(inl + 1)) + skb_headlen(skb), + fragptr, spc - skb_headlen(skb)); + fragptr += spc - skb_headlen(skb); + } + inl = (void *) (inl + 1) + spc; + memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); + } else { + skb_copy_from_linear_data(skb, inl + 1, spc); + inl = (void *) (inl + 1) + spc; + skb_copy_from_linear_data_offset(skb, spc, inl + 1, + skb_headlen(skb) - spc); + if (skb_shinfo(skb)->nr_frags) + memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, + fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); + } + + wmb(); + inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); + } + tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag); + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; +} + +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + u16 vlan_tag = 0; + + /* If we support per priority flow control and the packet contains + * a vlan tag, send the packet to the TX ring assigned to that priority + */ + if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { + vlan_tag = vlan_tx_tag_get(skb); + return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); + } + + return skb_tx_hash(dev, skb); +} + +static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt) +{ + __iowrite64_copy(dst, src, bytecnt / 8); +} + +netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_cq *cq; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct skb_frag_struct *frag; + struct mlx4_en_tx_info *tx_info; + struct ethhdr *ethh; + u64 mac; + u32 mac_l, mac_h; + int tx_ind = 0; + int nr_txbb; + int desc_size; + int real_size; + dma_addr_t dma; + u32 index, bf_index; + __be32 op_own; + u16 vlan_tag = 0; + int i; + int lso_header_size; + void *fragptr; + bool bounce = false; + + if (!priv->port_up) + goto tx_drop; + + real_size = get_real_size(skb, dev, &lso_header_size); + if (unlikely(!real_size)) + goto tx_drop; + + /* Align descriptor to TXBB size */ + desc_size = ALIGN(real_size, TXBB_SIZE); + nr_txbb = desc_size / TXBB_SIZE; + if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { + if (netif_msg_tx_err(priv)) + en_warn(priv, "Oversized header or SG list\n"); + goto tx_drop; + } + + tx_ind = skb->queue_mapping; + ring = &priv->tx_ring[tx_ind]; + if (vlan_tx_tag_present(skb)) + vlan_tag = vlan_tx_tag_get(skb); + + /* Check available TXBBs And 2K spare for prefetch */ + if (unlikely(((int)(ring->prod - ring->cons)) > + ring->size - HEADROOM - MAX_DESC_TXBBS)) { + /* every full Tx ring stops queue */ + netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); + ring->blocked = 1; + priv->port_stats.queue_stopped++; + + /* Use interrupts to find out when queue opened */ + cq = &priv->tx_cq[tx_ind]; + mlx4_en_arm_cq(priv, cq); + return NETDEV_TX_BUSY; + } + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32) (ring->prod - ring->cons - 1)); + + /* Packet is good - grab an index and transmit it */ + index = ring->prod & ring->size_mask; + bf_index = ring->prod; + + /* See if we have enough space for whole descriptor TXBB for setting + * SW ownership on next descriptor; if not, use a bounce buffer. */ + if (likely(index + nr_txbb <= ring->size)) + tx_desc = ring->buf + index * TXBB_SIZE; + else { + tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; + bounce = true; + } + + /* Save skb in tx_info ring */ + tx_info = &ring->tx_info[index]; + tx_info->skb = skb; + tx_info->nr_txbb = nr_txbb; + + /* Prepare ctrl segement apart opcode+ownership, which depends on + * whether LSO is used */ + tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); + tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag; + tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; + tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | + MLX4_WQE_CTRL_SOLICITED); + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + ring->tx_csum++; + } + + if (unlikely(priv->validate_loopback)) { + /* Copy dst mac address to wqe */ + skb_reset_mac_header(skb); + ethh = eth_hdr(skb); + if (ethh && ethh->h_dest) { + mac = mlx4_en_mac_to_u64(ethh->h_dest); + mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16); + mac_l = (u32) (mac & 0xffffffff); + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h); + tx_desc->ctrl.imm = cpu_to_be32(mac_l); + } + } + + /* Handle LSO (TSO) packets */ + if (lso_header_size) { + /* Mark opcode as LSO */ + op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + /* Fill in the LSO prefix */ + tx_desc->lso.mss_hdr_size = cpu_to_be32( + skb_shinfo(skb)->gso_size << 16 | lso_header_size); + + /* Copy headers; + * note that we already verified that it is linear */ + memcpy(tx_desc->lso.header, skb->data, lso_header_size); + data = ((void *) &tx_desc->lso + + ALIGN(lso_header_size + 4, DS_SIZE)); + + priv->port_stats.tso_packets++; + i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + + !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); + ring->bytes += skb->len + (i - 1) * lso_header_size; + ring->packets += i; + } else { + /* Normal (Non LSO) packet */ + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + data = &tx_desc->data; + ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); + ring->packets++; + + } + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); + + + /* valid only for none inline segments */ + tx_info->data_offset = (void *) data - (void *) tx_desc; + + tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; + data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; + + if (!is_inline(skb, &fragptr)) { + /* Map fragments */ + for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { + frag = &skb_shinfo(skb)->frags[i]; + dma = skb_frag_dma_map(&mdev->dev->pdev->dev, frag, + 0, skb_frag_size(frag), + DMA_TO_DEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_frag_size(frag)); + --data; + } + + /* Map linear part */ + if (tx_info->linear) { + dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size, + skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); + data->addr = cpu_to_be64(dma); + data->lkey = cpu_to_be32(mdev->mr.key); + wmb(); + data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); + } + tx_info->inl = 0; + } else { + build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); + tx_info->inl = 1; + } + + ring->prod += nr_txbb; + + /* If we used a bounce buffer then copy descriptor back into place */ + if (bounce) + tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); + + /* Run destructor before passing skb to HW */ + if (likely(!skb_shared(skb))) + skb_orphan(skb); + + if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { - *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; ++ *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + /* Ensure new descirptor hits memory + * before setting ownership of this descriptor to HW */ + wmb(); + tx_desc->ctrl.owner_opcode = op_own; + wmb(); - writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); ++ iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); + } + + /* Poll CQ here */ + mlx4_en_xmit_poll(priv, tx_ind); + + return NETDEV_TX_OK; + +tx_drop: + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; +} + diff --cc drivers/net/ethernet/realtek/r8169.c index aa39e771175c,000000000000..92b45f08858f mode 100644,000000..100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -1,6219 -1,0 +1,6243 @@@ +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define RTL8169_VERSION "2.3LK-NAPI" +#define MODULENAME "r8169" +#define PFX MODULENAME ": " + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" + +#ifdef RTL8169_DEBUG +#define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } +#define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8169_DEBUG */ + +#define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#define TX_BUFFS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +/* MAC address length */ +#define MAC_ADDR_LEN 6 + +#define MAX_READ_REQUEST_SHIFT 12 +#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_NAPI_WEIGHT 64 +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ +#define RX_BUF_SIZE 1536 /* Rx Buffer size */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define RTL8169_TX_TIMEOUT (6*HZ) +#define RTL8169_PHY_TIMEOUT (10*HZ) + +#define RTL_EEPROM_SIG cpu_to_le32(0x8129) +#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff) +#define RTL_EEPROM_SIG_ADDR 0x0000 + +/* write/read MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) +#define RTL_R8(reg) readb (ioaddr + (reg)) +#define RTL_R16(reg) readw (ioaddr + (reg)) +#define RTL_R32(reg) readl (ioaddr + (reg)) + +enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_NONE = 0xff, +}; + +enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, +}; + +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +#define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ +} + +static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), +}; +#undef _R + +enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 +}; + +static void rtl_hw_start_8169(struct net_device *); +static void rtl_hw_start_8168(struct net_device *); +static void rtl_hw_start_8101(struct net_device *); + +static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +static int rx_buf_sz = 16383; +static int use_dac; +static struct { + u32 msg_enable; +} debug = { -1 }; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + FuncForceEvent = 0xfc, +}; + +enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0f +#define CSIAR_BYTE_ENABLE_SHIFT 12 +#define CSIAR_ADDR_MASK 0x0fff + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define PWM_EN (1 << 22) +}; + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* DumpCounterCommand */ + CounterDump = 0x8, +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* Second doubleword. */ +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IP_CS = (1 << 29), /* Calculate IP checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +static const struct rtl_tx_desc_info { + struct { + u32 udp; + u32 tcp; + } checksum; + u16 mss_shift; + u16 opts_offset; +} tx_desc_info [] = { + [RTL_TD_0] = { + .checksum = { + .udp = TD0_IP_CS | TD0_UDP_CS, + .tcp = TD0_IP_CS | TD0_TCP_CS + }, + .mss_shift = TD0_MSS_SHIFT, + .opts_offset = 0 + }, + [RTL_TD_1] = { + .checksum = { + .udp = TD1_IP_CS | TD1_UDP_CS, + .tcp = TD1_IP_CS | TD1_TCP_CS + }, + .mss_shift = TD1_MSS_SHIFT, + .opts_offset = 1 + } +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RsvdMask 0x3fffc000 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + spinlock_t lock; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_rx; + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + u16 intr_event; + u16 napi_event; + u16 intr_mask; + + struct mdio_ops { + void (*write)(void __iomem *, int, int); + int (*read)(void __iomem *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + struct delayed_work task; + unsigned features; + + struct mii_if_info mii; + struct rtl8169_counters counters; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + +#define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; +#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) +}; + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RTL8169_VERSION); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); + +static int rtl8169_open(struct net_device *dev); +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); +static int rtl8169_init_ring(struct net_device *dev); +static void rtl_hw_start(struct net_device *dev); +static int rtl8169_close(struct net_device *dev); +static void rtl_set_rx_mode(struct net_device *dev); +static void rtl8169_tx_timeout(struct net_device *dev); +static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); +static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, + void __iomem *, u32 budget); +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); +static void rtl8169_down(struct net_device *dev); +static void rtl8169_rx_clear(struct rtl8169_private *tp); +static int rtl8169_poll(struct napi_struct *napi, int budget); + +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +{ + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); + ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + } +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + return RTL_R32(OCPDR); +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + for (i = 0; i < 20; i++) { + udelay(100); + if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0) + break; + } +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + void __iomem *ioaddr = tp->mmio_addr; + int i; + + RTL_W8(ERIDR, cmd); + RTL_W32(ERIAR, 0x800010e8); + msleep(2); + for (i = 0; i < 5; i++) { + udelay(100); + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + } + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if (ocp_read(tp, 0x0f, reg) & 0x00000800) + break; + } +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + u16 reg; + int i; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + reg = rtl8168_get_ocp_reg(tp); + + for (i = 0; i < 10; i++) { + msleep(10); + if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0) + break; + } +} + +static int r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; +} + +static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +{ + int i; + + RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff)); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed writing to the specified + * MII register. + */ + if (!(RTL_R32(PHYAR) & 0x80000000)) + break; + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr) +{ + int i, value = -1; + + RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16); + + for (i = 20; i > 0; i--) { + /* + * Check if the RTL8169 has completed retrieving data from + * the specified MII register. + */ + if (RTL_R32(PHYAR) & 0x80000000) { + value = RTL_R32(PHYAR) & 0xffff; + break; + } + udelay(25); + } + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data) +{ + int i; + + RTL_W32(OCPDR, data | + ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(OCPAR) & OCPAR_FLAG)) + break; + } +} + +static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +{ + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD | + (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr) +{ + int i; + + r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (RTL_R32(OCPAR) & OCPAR_FLAG) + break; + } + + return RTL_R32(OCPDR) & OCPDR_DATA_MASK; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value) +{ + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(ioaddr, reg_addr, value); + + r8168dp_2_mdio_stop(ioaddr); +} + +static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr) +{ + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(ioaddr, reg_addr); + + r8168dp_2_mdio_stop(ioaddr); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) +{ + tp->mdio_ops.write(tp->mmio_addr, location, val); +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + return tp->mdio_ops.read(tp->mmio_addr, location); +} + +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) +{ + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); +} + +static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +{ + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val | p) & ~m); +} + +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); +} + +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); +} + +static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) +{ + unsigned int i; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG)) + break; + udelay(10); + } +} + +static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr) +{ + u16 value = 0xffff; + unsigned int i; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(EPHYAR) & EPHYAR_FLAG) { + value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK; + break; + } + udelay(10); + } + + return value; +} + +static void rtl_csi_write(void __iomem *ioaddr, int addr, int value) +{ + unsigned int i; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(CSIAR) & CSIAR_FLAG)) + break; + udelay(10); + } +} + +static u32 rtl_csi_read(void __iomem *ioaddr, int addr) +{ + u32 value = ~0x00; + unsigned int i; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + for (i = 0; i < 100; i++) { + if (RTL_R32(CSIAR) & CSIAR_FLAG) { + value = RTL_R32(CSIDR); + break; + } + udelay(10); + } + + return value; +} + +static +void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) +{ + unsigned int i; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + for (i = 0; i < 100; i++) { + if (!(RTL_R32(ERIAR) & ERIAR_FLAG)) + break; + udelay(100); + } +} + +static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type) +{ + u32 value = ~0x00; + unsigned int i; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + for (i = 0; i < 100; i++) { + if (RTL_R32(ERIAR) & ERIAR_FLAG) { + value = RTL_R32(ERIDR); + break; + } + udelay(100); + } + + return value; +} + +static void +rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) +{ + u32 val; + + val = rtl_eri_read(ioaddr, addr, type); + rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); +} + +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(void __iomem *ioaddr, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + +static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) +{ + u8 value = 0xff; + unsigned int i; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + for (i = 0; i < 300; i++) { + if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { + value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; + break; + } + udelay(100); + } + + return value; +} + +static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) +{ + RTL_W16(IntrMask, 0x0000); + + RTL_W16(IntrStatus, 0xffff); +} + +static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; +} + +static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) +{ + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; +} + +static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +{ + return RTL_R32(TBICSR) & TBILinkOk; +} + +static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +{ + return RTL_R8(PHYstatus) & LinkStatus; +} + +static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); +} + +static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) +{ + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x00000011, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x00000005, ERIAR_EXGMAC); + } else { + rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, + 0x0000001f, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, + 0x0000003f, ERIAR_EXGMAC); + } + } +} + +static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) +{ + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 100); + } + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + __rtl8169_check_link_status(dev, tp, ioaddr, false); +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; +} + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + spin_unlock_irq(&tp->lock); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_ANY, Config1, PMEnable }, + { WAKE_PHY, Config3, LinkUp }, + { WAKE_MAGIC, Config3, MagicPacket }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake } + }; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + for (i = 0; i < ARRAY_SIZE(cfg); i++) { + u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + spin_lock_irq(&tp->lock); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + spin_unlock_irq(&tp->lock); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; +} + +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) +{ + return rtl_chip_infos[tp->mac_version].fw_name; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8169_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" : + rtl_fw->version); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; +out: + return rc; +} + +static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } +out: + return ret; +} + +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int ret; + + del_timer_sync(&tp->timer); + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static u32 rtl8169_fix_features(struct net_device *dev, u32 features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; +} + +static int rtl8169_set_features(struct net_device *dev, u32 features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + return (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); + + desc->opts2 = 0; +} + +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; +} + +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); +} + +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&tp->lock, flags); + + rc = tp->get_settings(dev, cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + return rc; +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long flags; + + if (regs->len > R8169_REGS_SIZE) + regs->len = R8169_REGS_SIZE; + + spin_lock_irqsave(&tp->lock, flags); + memcpy_fromio(p, tp->mmio_addr, regs->len); + spin_unlock_irqrestore(&tp->lock, flags); +} + +static u32 rtl8169_get_msglevel(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void rtl8169_set_msglevel(struct net_device *dev, u32 value) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +static void rtl8169_update_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + struct rtl8169_counters *counters; + dma_addr_t paddr; + u32 cmd; + int wait = 1000; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return; + + counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); + if (!counters) + return; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | CounterDump); + + while (wait--) { + if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) { + memcpy(&tp->counters, counters, sizeof(*counters)); + break; + } + udelay(10); + } + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + dma_free_coherent(d, sizeof(*counters), counters, paddr); +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(tp->counters.tx_packets); + data[1] = le64_to_cpu(tp->counters.rx_packets); + data[2] = le64_to_cpu(tp->counters.tx_errors); + data[3] = le32_to_cpu(tp->counters.rx_errors); + data[4] = le16_to_cpu(tp->counters.rx_missed); + data[5] = le16_to_cpu(tp->counters.align_errors); + data[6] = le32_to_cpu(tp->counters.tx_one_collision); + data[7] = le32_to_cpu(tp->counters.tx_multi_collision); + data[8] = le64_to_cpu(tp->counters.rx_unicast); + data[9] = le64_to_cpu(tp->counters.rx_broadcast); + data[10] = le32_to_cpu(tp->counters.rx_multicast); + data[11] = le16_to_cpu(tp->counters.tx_aborted); + data[12] = le16_to_cpu(tp->counters.tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, +}; + +static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) +{ + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168F family. */ + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } +} + +static void rtl8169_print_mac_version(struct rtl8169_private *tp) +{ + dprintk("mac_version = 0x%02x\n", tp->mac_version); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) +{ + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } +} + +#define PHY_READ 0x00000000 +#define PHY_DATA_OR 0x10000000 +#define PHY_DATA_AND 0x20000000 +#define PHY_BJMPN 0x30000000 +#define PHY_READ_EFUSE 0x40000000 +#define PHY_READ_MAC_BYTE 0x50000000 +#define PHY_WRITE_MAC_BYTE 0x60000000 +#define PHY_CLEAR_READCOUNT 0x70000000 +#define PHY_WRITE 0x80000000 +#define PHY_READCOUNT_EQ_SKIP 0x90000000 +#define PHY_COMP_EQ_SKIPN 0xa0000000 +#define PHY_COMP_NEQ_SKIPN 0xb0000000 +#define PHY_WRITE_PREVIOUS 0xc0000000 +#define PHY_SKIPN 0xd0000000 +#define PHY_DELAY_MS 0xe0000000 +#define PHY_WRITE_ERI_WORD 0xf0000000 + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; +out: + return rc; +} + +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) +{ + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_READ_EFUSE: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; +out: + return rc; +} + +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firwmare\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; +out: + return rc; +} + +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + u32 predata, count; + size_t index; + + predata = count = 0; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_READ_EFUSE: + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + BUG(); + } + } +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; +} + +static void rtl_apply_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); +} + +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) +{ + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) +{ + rtl8168c_3_hw_phy_config(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + void __iomem *ioaddr = tp->mmio_addr; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, + ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); - rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); ++ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl_hw_phy_config(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + default: + break; + } +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + spin_lock_irq(&tp->lock); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + goto out_unlock; + + netif_warn(tp, link, dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + +out_mod_timer: + mod_timer(timer, jiffies + timeout); +out_unlock: + spin_unlock_irq(&tp->lock); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + disable_irq(pdev->irq); + rtl8169_interrupt(pdev->irq, dev); + enable_irq(pdev->irq); +} +#endif + +static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) +{ + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) +{ + unsigned int i; + + tp->phy_reset_enable(tp); + for (i = 0; i < 100; i++) { + if (!tp->phy_reset_pending(tp)) + return; + msleep(1); + } + netif_err(tp, link, dev, "PHY reset failed\n"); +} + +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); +} + +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + rtl8169_phy_reset(dev, tp); + + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0)); + + if (rtl_tbi_enabled(tp)) + netif_info(tp, link, dev, "TBI auto-negotiating\n"); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 high; + u32 low; + + low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); + high = addr[4] | (addr[5] << 8); + + spin_lock_irq(&tp->lock); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W32(MAC4, high); + RTL_R32(MAC4); + + RTL_W32(MAC0, low); + RTL_R32(MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = low }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = high }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 | + low >> 16 }, + }; + + rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + spin_unlock_irq(&tp->lock); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; +} + +static int rtl_xmii_ioctl(struct rtl8169_private *tp, + struct mii_ioctl_data *data, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + return 0; + + case SIOCGMIIREG: + data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: + rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +{ + return -EOPNOTSUPP; +} + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 intr_event; + u16 napi_event; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .intr_event = SYSErr | LinkChg | RxOverflow | + RxFIFOOver | TxErr | TxOK | RxOK | RxErr, + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .intr_event = SYSErr | LinkChg | RxOverflow | + TxErr | TxOK | RxOK | RxErr, + .napi_event = TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | + RxFIFOOver | TxErr | TxOK | RxOK | RxErr, + .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, + const struct rtl_cfg_info *cfg) +{ + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(pdev)) { + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + RTL_W8(Config2, cfg2); + return msi; +} + +static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { + pci_disable_msi(pdev); + tp->features &= ~RTL_FEATURE_MSI; + } +} + +static const struct net_device_ops rtl8169_netdev_ops = { + .ndo_open = rtl8169_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats = rtl8169_get_stats, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) +{ + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } +} + ++static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) ++{ ++ void __iomem *ioaddr = tp->mmio_addr; ++ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_29: ++ case RTL_GIGA_MAC_VER_30: ++ case RTL_GIGA_MAC_VER_32: ++ case RTL_GIGA_MAC_VER_33: ++ case RTL_GIGA_MAC_VER_34: ++ RTL_W32(RxConfig, RTL_R32(RxConfig) | ++ AcceptBroadcast | AcceptMulticast | AcceptMyPhys); ++ break; ++ default: ++ break; ++ } ++} ++ ++static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) ++{ ++ if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) ++ return false; ++ ++ rtl_writephy(tp, 0x1f, 0x0000); ++ rtl_writephy(tp, MII_BMCR, 0x0000); ++ ++ rtl_wol_suspend_quirk(tp); ++ ++ return true; ++} ++ +static void r810x_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +static void r810x_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r810x_pll_power_down(struct rtl8169_private *tp) +{ - void __iomem *ioaddr = tp->mmio_addr; - - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_29 || - tp->mac_version == RTL_GIGA_MAC_VER_30) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); ++ if (rtl_wol_pll_power_down(tp)) + return; - } + + r810x_phy_power_down(tp); +} + +static void r810x_pll_power_up(struct rtl8169_private *tp) +{ + r810x_phy_power_up(tp); +} + +static void r8168_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0000); + break; + default: + break; + } + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r8168_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0200); + default: + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static void r8168_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) && + r8168dp_check_dash(tp)) { + return; + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || + tp->mac_version == RTL_GIGA_MAC_VER_24) && + (RTL_R16(CPlusCmd) & ASF)) { + return; + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(ioaddr, 0x19, 0xff64); + - if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); - - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || - tp->mac_version == RTL_GIGA_MAC_VER_33 || - tp->mac_version == RTL_GIGA_MAC_VER_34) - RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | - AcceptMulticast | AcceptMyPhys); ++ if (rtl_wol_pll_power_down(tp)) + return; - } + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r8168_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) && + r8168dp_check_dash(tp)) { + return; + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } + + r8168_phy_power_up(tp); +} + +static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) +{ + if (op) + op(tp); +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.down); +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.up); +} + +static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) +{ + struct pll_power_ops *ops = &tp->pll_power_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + ops->down = r810x_pll_power_down; + ops->up = r810x_pll_power_up; + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; + break; + + default: + ops->down = NULL; + ops->up = NULL; + break; + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + default: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; +} + +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->jumbo_ops.enable); +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->jumbo_ops.disable); +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + pci_write_config_byte(pdev, 0x79, 0x20); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + pci_write_config_byte(pdev, 0x79, 0x50); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + int i; + + /* Soft reset the chip. */ + RTL_W8(ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 0; i < 100; i++) { + if ((RTL_R8(ChipCmd) & CmdReset) == 0) + break; + udelay(100); + } + + rtl8169_init_ring_indexes(tp); +} + +static int __devinit +rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + if (netif_msg_drv(&debug)) + dev_err(&pdev->dev, "unable to alloc new ethernet\n"); + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl8169_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = RxChkSum; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + RTL_W16(IntrMask, 0x0000); + + rtl_hw_reset(tp); + + RTL_W16(IntrStatus, 0xffff); + + pci_set_master(pdev); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(pdev, ioaddr, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + spin_lock_init(&tp->lock); + + /* Get MAC address */ + for (i = 0; i < MAC_ADDR_LEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + dev->irq = pdev->irq; + dev->base_addr = (unsigned long) ioaddr; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_RX; + + tp->intr_mask = 0xffff; + tp->hw_start = cfg->hw_start; + tp->intr_event = cfg->intr_event; + tp->napi_event = cfg->napi_event; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_msi_4: + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; +} + +static void __devexit rtl8169_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_stop(tp); + } + + cancel_delayed_work_sync(&tp->task); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); + pci_set_drvdata(pdev, NULL); +} + +static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; +out: + return; + +err_release_firmware: + release_firmware(rtl_fw->fw); +err_free: + kfree(rtl_fw); +err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); +out_no_firmware: + tp->rtl_fw = NULL; + goto out; +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); +} + +static int rtl8169_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx desscriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; + + INIT_DELAYED_WORK(&tp->task, NULL); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(dev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; + +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(ioaddr); + + rtl_rx_close(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + while (RTL_R8(TxPoll) & NPQ) + udelay(20); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) + udelay(100); + } else { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + udelay(100); + } + + rtl_hw_reset(tp); +} + +static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Set DMA burst size and Interframe Gap Time */ + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); +} + +static void rtl_hw_start(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->hw_start(dev); + + netif_start_queue(dev); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +{ + u16 cmd; + + cmd = RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, cmd); + return cmd; +} + +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(RxMaxSize, rx_buf_sz + 1); +} + +static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +{ + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(0x7c, p->val); + break; + } + } +} + +static void rtl_hw_start_8169(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) { + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_init_rxcfg(tp); + + RTL_W8(EarlyTxThres, NoEarlyTx); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + rtl_set_rx_tx_config_registers(tp); + + tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + dprintk("Set MAC Reg C+CR Offset 0xE0. " + "Bit-3 and bit-14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(ioaddr, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + if (tp->mac_version != RTL_GIGA_MAC_VER_01 && + tp->mac_version != RTL_GIGA_MAC_VER_02 && + tp->mac_version != RTL_GIGA_MAC_VER_03 && + tp->mac_version != RTL_GIGA_MAC_VER_04) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(IntrMask); + + RTL_W32(RxMissed, 0); + + rtl_set_rx_mode(dev); + + /* no early-rx interrupts */ + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + + /* Enable all known interrupts by setting the interrupt mask. */ + RTL_W16(IntrMask, tp->intr_event); +} + +static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) +{ + u32 csi; + + csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; + rtl_csi_write(ioaddr, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(void __iomem *ioaddr) +{ + rtl_csi_access_enable(ioaddr, 0x17000000); +} + +static void rtl_csi_access_enable_2(void __iomem *ioaddr) +{ + rtl_csi_access_enable(ioaddr, 0x27000000); +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(ioaddr, e->offset, w); + e++; + } +} + +static void rtl_disable_clock_request(struct pci_dev *pdev) +{ + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); + ctl &= ~PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); + } +} + +static void rtl_enable_clock_request(struct pci_dev *pdev) +{ + int cap = pci_pcie_cap(pdev); + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl); + ctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl); + } +} + +#define R8168_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) +{ + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + + rtl_tx_performance_tweak(pdev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_hw_start_8168bb(ioaddr, pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) +{ + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_disable_clock_request(pdev); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(ioaddr, pdev); +} + +static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(DBG_REG, 0x20); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(ioaddr, pdev); +} + +static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(ioaddr, pdev); +} + +static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_hw_start_8168c_2(ioaddr, pdev); +} + +static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_2(ioaddr); + + __rtl_hw_start_8168cp(ioaddr, pdev); +} + +static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_2(ioaddr); + + rtl_disable_clock_request(pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_1(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); +} + +static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, ~0, 0x48 }, + { 0x19, 0x20, 0x50 }, + { 0x0c, ~0, 0x20 } + }; + int i; + + rtl_csi_access_enable_1(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { + const struct ephy_info *e = e_info_8168d_4 + i; + u16 w; + + w = rtl_ephy_read(ioaddr, e->offset); + rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits); + } + + rtl_enable_clock_request(pdev); +} + +static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_csi_access_enable_2(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + + /* Reset tx FIFO pointer */ + RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); + RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, + ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(ioaddr); + + rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, + ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x5151); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { + tp->intr_event |= RxFIFOOver | PCSTimeout; + tp->intr_event &= ~RxOverflow; + } + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_mode(dev); + + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + + RTL_R8(IntrMask); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(ioaddr, pdev); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(ioaddr, pdev); + break; + + default: + printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", + dev->name, tp->mac_version); + break; + } + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); + + RTL_W16(IntrMask, tp->intr_event); +} + +#define R810X_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_csi_access_enable_2(ioaddr); + + RTL_W8(DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + cfg1 = RTL_R8(Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); +} + +static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_csi_access_enable_2(ioaddr); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_hw_start_8102e_2(ioaddr, pdev); + + rtl_ephy_write(ioaddr, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + + rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); +} + +static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_hw_start_8105e_1(ioaddr, pdev); + rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8101(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) { + int cap = pci_pcie_cap(pdev); + + if (cap) { + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + } + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(ioaddr, pdev); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(ioaddr, pdev); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(ioaddr, pdev); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + + RTL_R8(IntrMask); + + rtl_set_rx_mode(dev); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); + + RTL_W16(IntrMask, tp->intr_event); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) + return -EINVAL; + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) +{ + dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); +} + +static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); +} + +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + void *data; + dma_addr_t mapping; + struct device *d = &tp->pci_dev->dev; + struct net_device *dev = tp->dev; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + return data; + +err_out: + kfree(data); + return NULL; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } +} + +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + if (tp->Rx_databuff[i]) + continue; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + +err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; +} + +static int rtl8169_init_ring(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; + dev_kfree_skb(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + PREPARE_DELAYED_WORK(&tp->task, task); + schedule_delayed_work(&tp->task, 4); +} + +static void rtl8169_wait_for_quiescence(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + synchronize_irq(dev->irq); + + /* Wait for any pending NAPI task to complete */ + napi_disable(&tp->napi); + + rtl8169_irq_mask_and_ack(ioaddr); + + tp->intr_mask = 0xffff; + RTL_W16(IntrMask, tp->intr_event); + napi_enable(&tp->napi); +} + +static void rtl8169_reinit_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, task.work); + struct net_device *dev = tp->dev; + int ret; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + rtl8169_wait_for_quiescence(dev); + rtl8169_close(dev); + + ret = rtl8169_open(dev); + if (unlikely(ret < 0)) { + if (net_ratelimit()) + netif_err(tp, drv, dev, + "reinit failure (status = %d). Rescheduling\n", + ret); + rtl8169_schedule_work(dev, rtl8169_reinit_task); + } + +out_unlock: + rtnl_unlock(); +} + +static void rtl8169_reset_task(struct work_struct *work) +{ + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, task.work); + struct net_device *dev = tp->dev; + int i; + + rtnl_lock(); + + if (!netif_running(dev)) + goto out_unlock; + + rtl8169_wait_for_quiescence(dev); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + + rtl8169_tx_clear(tp); + + rtl8169_hw_reset(tp); + rtl_hw_start(dev); + netif_wake_queue(dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); + +out_unlock: + rtnl_unlock(); +} + +static void rtl8169_tx_timeout(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_hw_reset(tp); + + /* Let's wait a bit while any (async) irq lands on */ + rtl8169_schedule_work(dev, rtl8169_reset_task); +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc * uninitialized_var(txd); + struct device *d = &tp->pci_dev->dev; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static inline void rtl8169_tso_csum(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; + u32 mss = skb_shinfo(skb)->gso_size; + int offset = info->opts_offset; + + if (mss) { + opts[0] |= TD_LSO; + opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[offset] |= info->checksum.tcp; + else if (ip->protocol == IPPROTO_UDP) + opts[offset] |= info->checksum.udp; + else + WARN_ON_ONCE(1); + } +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); + opts[0] = DescOwn; + + rtl8169_tso_csum(tp, skb, opts); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + tp->cur_tx += frags + 1; + + wmb(); + + RTL_W8(TxPoll, NPQ); + + if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + netif_stop_queue(dev); + smp_rmb(); + if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); +err_dma_0: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { + void __iomem *ioaddr = tp->mmio_addr; + + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl8169_schedule_work(dev, rtl8169_reinit_task); +} + +static void rtl8169_tx_interrupt(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + rmb(); + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + dev->stats.tx_packets++; + dev->stats.tx_bytes += tx_skb->skb->len; + dev_kfree_skb(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + smp_wmb(); + if (netif_queue_stopped(dev) && + (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + smp_rmb(); + if (tp->cur_tx != dirty_tx) + RTL_W8(TxPoll, NPQ); + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + struct device *d = &tp->pci_dev->dev; + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; +} + +static int rtl8169_rx_interrupt(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, u32 budget) +{ + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = min(rx_left, budget); + + for (; rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + rmb(); + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + + if (status & DescOwn) + break; + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + if (status & RxFOVF) { + rtl8169_schedule_work(dev, rtl8169_reset_task); + dev->stats.rx_fifo_errors++; + } + rtl8169_mark_to_asic(desc, rx_buf_sz); + } else { + struct sk_buff *skb; + dma_addr_t addr = le64_to_cpu(desc->addr); + int pkt_size = (status & 0x00003fff) - 4; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + rtl8169_mark_to_asic(desc, rx_buf_sz); + continue; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + rtl8169_mark_to_asic(desc, rx_buf_sz); + if (!skb) { + dev->stats.rx_dropped++; + continue; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + napi_gro_receive(&tp->napi, skb); + + dev->stats.rx_bytes += pkt_size; + dev->stats.rx_packets++; + } + + /* Work around for AMD plateform. */ + if ((desc->opts2 & cpu_to_le32(0xfffe000)) && + (tp->mac_version == RTL_GIGA_MAC_VER_05)) { + desc->opts2 = 0; + cur_rx++; + } + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + tp->dirty_rx += count; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int handled = 0; + int status; + + /* loop handling interrupts until we have no new ones or + * we hit a invalid/hotplug case. + */ + status = RTL_R16(IntrStatus); + while (status && status != 0xffff) { + handled = 1; + + /* Handle all of the error cases first. These will reset + * the chip, so just exit the loop. + */ + if (unlikely(!netif_running(dev))) { + rtl8169_hw_reset(tp); + break; + } + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_26: + netif_stop_queue(dev); + rtl8169_tx_timeout(dev); + goto done; + /* Testers needed. */ + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + /* Experimental science. Pktgen proof. */ + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_25: + if (status == RxFIFOOver) + goto done; + break; + default: + break; + } + } + + if (unlikely(status & SYSErr)) { + rtl8169_pcierr_interrupt(dev); + break; + } + + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, ioaddr, true); + + /* We need to see the lastest version of tp->intr_mask to + * avoid ignoring an MSI interrupt and having to wait for + * another event which may never come. + */ + smp_rmb(); + if (status & tp->intr_mask & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(napi_schedule_prep(&tp->napi))) + __napi_schedule(&tp->napi); + else + netif_info(tp, intr, dev, + "interrupt %04x in poll\n", status); + } + + /* We only get a new MSI interrupt when all active irq + * sources on the chip have been acknowledged. So, ack + * everything we've seen and check if new sources have become + * active to avoid blocking all interrupts from the chip. + */ + RTL_W16(IntrStatus, + (status & RxFIFOOver) ? (status | RxOverflow) : status); + status = RTL_R16(IntrStatus); + } +done: + return IRQ_RETVAL(handled); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + void __iomem *ioaddr = tp->mmio_addr; + int work_done; + + work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); + rtl8169_tx_interrupt(dev, tp, ioaddr); + + if (work_done < budget) { + napi_complete(napi); + + /* We need for force the visibility of tp->intr_mask + * for other CPUs, as we can loose an MSI interrupt + * and potentially wait for a retransmit timeout if we don't. + * The posted write to IntrMask is safe, as it will + * eventually make it to the chip and we won't loose anything + * until it does. + */ + tp->intr_mask = 0xffff; + wmb(); + RTL_W16(IntrMask, tp->intr_event); + } + + return work_done; +} + +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); + RTL_W32(RxMissed, 0); +} + +static void rtl8169_down(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + del_timer_sync(&tp->timer); + + netif_stop_queue(dev); + + napi_disable(&tp->napi); + + spin_lock_irq(&tp->lock); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, + * rtl8169_reinit_task) and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev, ioaddr); + + spin_unlock_irq(&tp->lock); + + synchronize_irq(dev->irq); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(dev); + + rtl8169_down(dev); + + free_irq(dev->irq, dev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + spin_lock_irqsave(&tp->lock, flags); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +/** + * rtl8169_get_stats - Get rtl8169 read/write statistics + * @dev: The Ethernet Device to get statistics for + * + * Get TX/RX statistics for rtl8169 + */ +static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + if (netif_running(dev)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8169_rx_missed(dev, ioaddr); + spin_unlock_irqrestore(&tp->lock, flags); + } + + return &dev->stats; +} + +static void rtl8169_net_suspend(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + rtl_pll_power_down(tp); + + netif_device_detach(dev); + netif_stop_queue(dev); +} + +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + rtl8169_net_suspend(dev); + + return 0; +} + +static void __rtl8169_resume(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + + rtl8169_schedule_work(dev, rtl8169_reset_task); +} + +static int rtl8169_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + spin_unlock_irq(&tp->lock); + + rtl8169_net_suspend(dev); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + spin_lock_irq(&tp->lock); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + spin_unlock_irq(&tp->lock); + + rtl8169_init_phy(dev, tp); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->TxDescArray ? -EBUSY : 0; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, +}; + +#define RTL8169_PM_OPS (&rtl8169_pm_ops) + +#else /* !CONFIG_PM */ + +#define RTL8169_PM_OPS NULL + +#endif /* !CONFIG_PM */ + ++static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) ++{ ++ void __iomem *ioaddr = tp->mmio_addr; ++ ++ /* WoL fails with 8168b when the receiver is disabled. */ ++ switch (tp->mac_version) { ++ case RTL_GIGA_MAC_VER_11: ++ case RTL_GIGA_MAC_VER_12: ++ case RTL_GIGA_MAC_VER_17: ++ pci_clear_master(tp->pci_dev); ++ ++ RTL_W8(ChipCmd, CmdRxEnb); ++ /* PCI commit */ ++ RTL_R8(ChipCmd); ++ break; ++ default: ++ break; ++ } ++} ++ +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + spin_lock_irq(&tp->lock); + + rtl8169_hw_reset(tp); + + spin_unlock_irq(&tp->lock); + + if (system_state == SYSTEM_POWER_OFF) { - /* WoL fails with 8168b when the receiver is disabled. */ - if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_12 || - tp->mac_version == RTL_GIGA_MAC_VER_17) && - (tp->features & RTL_FEATURE_WOL)) { - pci_clear_master(pdev); - - RTL_W8(ChipCmd, CmdRxEnb); - /* PCI commit */ - RTL_R8(ChipCmd); ++ if (__rtl8169_get_wol(tp) & WAKE_ANY) { ++ rtl_wol_suspend_quirk(tp); ++ rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl8169_init_one, + .remove = __devexit_p(rtl8169_remove_one), + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +static int __init rtl8169_init_module(void) +{ + return pci_register_driver(&rtl8169_pci_driver); +} + +static void __exit rtl8169_cleanup_module(void) +{ + pci_unregister_driver(&rtl8169_pci_driver); +} + +module_init(rtl8169_init_module); +module_exit(rtl8169_cleanup_module); diff --cc drivers/net/ethernet/smsc/smsc911x.c index a3aa4c0e87f3,000000000000..d2be42aafbef mode 100644,000000..100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@@ -1,2406 -1,0 +1,2408 @@@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + *************************************************************************** + * Rewritten, heavily based on smsc911x simple driver by SMSC. + * Partly uses io macros from smc91x.c by Nicolas Pitre + * + * Supported devices: + * LAN9115, LAN9116, LAN9117, LAN9118 + * LAN9215, LAN9216, LAN9217, LAN9218 + * LAN9210, LAN9211 + * LAN9220, LAN9221 ++ * LAN89218 + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smsc911x.h" + +#define SMSC_CHIPNAME "smsc911x" +#define SMSC_MDIONAME "smsc911x-mdio" +#define SMSC_DRV_VERSION "2008-10-21" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(SMSC_DRV_VERSION); +MODULE_ALIAS("platform:smsc911x"); + +#if USE_DEBUG > 0 +static int debug = 16; +#else +static int debug = 3; +#endif + +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +struct smsc911x_data; + +struct smsc911x_ops { + u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg); + void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val); + void (*rx_readfifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); + void (*tx_writefifo)(struct smsc911x_data *pdata, + unsigned int *buf, unsigned int wordcount); +}; + +struct smsc911x_data { + void __iomem *ioaddr; + + unsigned int idrev; + + /* used to decide which workarounds apply */ + unsigned int generation; + + /* device configuration (copied from platform_data during probe) */ + struct smsc911x_platform_config config; + + /* This needs to be acquired before calling any of below: + * smsc911x_mac_read(), smsc911x_mac_write() + */ + spinlock_t mac_lock; + + /* spinlock to ensure register accesses are serialised */ + spinlock_t dev_lock; + + struct phy_device *phy_dev; + struct mii_bus *mii_bus; + int phy_irq[PHY_MAX_ADDR]; + unsigned int using_extphy; + int last_duplex; + int last_carrier; + + u32 msg_enable; + unsigned int gpio_setting; + unsigned int gpio_orig_setting; + struct net_device *dev; + struct napi_struct napi; + + unsigned int software_irq_signal; + +#ifdef USE_PHY_WORK_AROUND +#define MIN_PACKET_SIZE (64) + char loopback_tx_pkt[MIN_PACKET_SIZE]; + char loopback_rx_pkt[MIN_PACKET_SIZE]; + unsigned int resetcount; +#endif + + /* Members for Multicast filter workaround */ + unsigned int multicast_update_pending; + unsigned int set_bits_mask; + unsigned int clear_bits_mask; + unsigned int hashhi; + unsigned int hashlo; + + /* register access functions */ + const struct smsc911x_ops *ops; +}; + +/* Easy access to information */ +#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift)) + +static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + reg); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | + ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); + + BUG(); + return 0; +} + +static inline u32 +__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) + return readl(pdata->ioaddr + __smsc_shift(pdata, reg)); + + if (pdata->config.flags & SMSC911X_USE_16BIT) + return (readw(pdata->ioaddr + + __smsc_shift(pdata, reg)) & 0xFFFF) | + ((readw(pdata->ioaddr + + __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16); + + BUG(); + return 0; +} + +static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = pdata->ops->reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; +} + +static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + reg); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, pdata->ioaddr + reg); + writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); + return; + } + + BUG(); +} + +static inline void +__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val) +{ + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writel(val, pdata->ioaddr + __smsc_shift(pdata, reg)); + return; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + writew(val & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg)); + writew((val >> 16) & 0xFFFF, + pdata->ioaddr + __smsc_shift(pdata, reg + 2)); + return; + } + + BUG(); +} + +static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + pdata->ops->reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO */ +static inline void +smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Writes a packet to the TX_DATA_FIFO - shifted version */ +static inline void +smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + writesl(pdata->ioaddr + __smsc_shift(pdata, + TX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + __smsc911x_reg_write_shift(pdata, + TX_DATA_FIFO, *buf++); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO */ +static inline void +smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* Reads a packet out of the RX_DATA_FIFO - shifted version */ +static inline void +smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf, + unsigned int wordcount) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { + while (wordcount--) + *buf++ = swab32(__smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO)); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_32BIT) { + readsl(pdata->ioaddr + __smsc_shift(pdata, + RX_DATA_FIFO), buf, wordcount); + goto out; + } + + if (pdata->config.flags & SMSC911X_USE_16BIT) { + while (wordcount--) + *buf++ = __smsc911x_reg_read_shift(pdata, + RX_DATA_FIFO); + goto out; + } + + BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + +/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read + * and smsc911x_mac_write, so assumes mac_lock is held */ +static int smsc911x_mac_complete(struct smsc911x_data *pdata) +{ + int i; + u32 val; + + SMSC_ASSERT_MAC_LOCK(pdata); + + for (i = 0; i < 40; i++) { + val = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (!(val & MAC_CSR_CMD_CSR_BUSY_)) + return 0; + } + SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. " + "MAC_CSR_CMD: 0x%08X", val); + return -EIO; +} + +/* Fetches a MAC register value. Assumes mac_lock is acquired */ +static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, "MAC busy at entry"); + return 0xFFFFFFFF; + } + + /* Send the MAC cmd */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the read to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return smsc911x_reg_read(pdata, MAC_CSR_DATA); + + SMSC_WARN(pdata, hw, "MAC busy after read"); + return 0xFFFFFFFF; +} + +/* Set a mac register, mac_lock must be acquired before calling */ +static void smsc911x_mac_write(struct smsc911x_data *pdata, + unsigned int offset, u32 val) +{ + unsigned int temp; + + SMSC_ASSERT_MAC_LOCK(pdata); + + temp = smsc911x_reg_read(pdata, MAC_CSR_CMD); + if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) { + SMSC_WARN(pdata, hw, + "smsc911x_mac_write failed, MAC busy at entry"); + return; + } + + /* Send data to write */ + smsc911x_reg_write(pdata, MAC_CSR_DATA, val); + + /* Write the actual data */ + smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) | + MAC_CSR_CMD_CSR_BUSY_)); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + /* Wait for the write to complete */ + if (likely(smsc911x_mac_complete(pdata) == 0)) + return; + + SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write"); +} + +/* Get a phy register */ +static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???"); + reg = -EIO; + goto out; + } + + /* Set the address, index & direction (read from PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6); + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for read to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = smsc911x_mac_read(pdata, MII_DATA); + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Set a phy register */ +static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val) +{ + struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv; + unsigned long flags; + unsigned int addr; + int i, reg; + + spin_lock_irqsave(&pdata->mac_lock, flags); + + /* Confirm MII not busy */ + if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???"); + reg = -EIO; + goto out; + } + + /* Put the data to write in the MAC */ + smsc911x_mac_write(pdata, MII_DATA, val); + + /* Set the address, index & direction (write to PHY) */ + addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) | + MII_ACC_MII_WRITE_; + smsc911x_mac_write(pdata, MII_ACC, addr); + + /* Wait for write to complete w/ timeout */ + for (i = 0; i < 100; i++) + if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) { + reg = 0; + goto out; + } + + SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish"); + reg = -EIO; + +out: + spin_unlock_irqrestore(&pdata->mac_lock, flags); + return reg; +} + +/* Switch to external phy. Assumes tx and rx are stopped. */ +static void smsc911x_phy_enable_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + /* Disable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to stop */ + + /* Switch to external phy */ + hwcfg |= HW_CFG_EXT_PHY_EN_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + + /* Enable phy clocks to the MAC */ + hwcfg &= (~HW_CFG_PHY_CLK_SEL_); + hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); + udelay(10); /* Enough time for clocks to restart */ + + hwcfg |= HW_CFG_SMI_SEL_; + smsc911x_reg_write(pdata, HW_CFG, hwcfg); +} + +/* Autodetects and enables external phy if present on supported chips. + * autodetection can be overridden by specifying SMSC911X_FORCE_INTERNAL_PHY + * or SMSC911X_FORCE_EXTERNAL_PHY in the platform_data flags. */ +static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata) +{ + unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG); + + if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing internal PHY"); + pdata->using_extphy = 0; + } else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) { + SMSC_TRACE(pdata, hw, "Forcing external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else if (hwcfg & HW_CFG_EXT_PHY_DET_) { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET set, using external PHY"); + smsc911x_phy_enable_external(pdata); + pdata->using_extphy = 1; + } else { + SMSC_TRACE(pdata, hw, + "HW_CFG EXT_PHY_DET clear, using internal PHY"); + pdata->using_extphy = 0; + } +} + +/* Fetches a tx status out of the status fifo */ +static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, TX_STATUS_FIFO); + + return result; +} + +/* Fetches the next rx status */ +static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata) +{ + unsigned int result = + smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_; + + if (result != 0) + result = smsc911x_reg_read(pdata, RX_STATUS_FIFO); + + return result; +} + +#ifdef USE_PHY_WORK_AROUND +static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata) +{ + unsigned int tries; + u32 wrsz; + u32 rdsz; + ulong bufp; + + for (tries = 0; tries < 10; tries++) { + unsigned int txcmd_a; + unsigned int txcmd_b; + unsigned int status; + unsigned int pktlength; + unsigned int i; + + /* Zero-out rx packet memory */ + memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE); + + /* Write tx packet to 118 */ + txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16; + txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + txcmd_a |= MIN_PACKET_SIZE; + + txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b); + + bufp = (ulong)pdata->loopback_tx_pkt & (~0x3); + wrsz = MIN_PACKET_SIZE + 3; + wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + + /* Wait till transmit is done */ + i = 60; + do { + udelay(5); + status = smsc911x_tx_get_txstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to transmit during loopback test"); + continue; + } + if (status & TX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Transmit encountered errors during loopback test"); + continue; + } + + /* Wait till receive is done */ + i = 60; + do { + udelay(5); + status = smsc911x_rx_get_rxstatus(pdata); + } while ((i--) && (!status)); + + if (!status) { + SMSC_WARN(pdata, hw, + "Failed to receive during loopback test"); + continue; + } + if (status & RX_STS_ES_) { + SMSC_WARN(pdata, hw, + "Receive encountered errors during loopback test"); + continue; + } + + pktlength = ((status & 0x3FFF0000UL) >> 16); + bufp = (ulong)pdata->loopback_rx_pkt; + rdsz = pktlength + 3; + rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3); + rdsz >>= 2; + + pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz); + + if (pktlength != (MIN_PACKET_SIZE + 4)) { + SMSC_WARN(pdata, hw, "Unexpected packet size " + "during loop back test, size=%d, will retry", + pktlength); + } else { + unsigned int j; + int mismatch = 0; + for (j = 0; j < MIN_PACKET_SIZE; j++) { + if (pdata->loopback_tx_pkt[j] + != pdata->loopback_rx_pkt[j]) { + mismatch = 1; + break; + } + } + if (!mismatch) { + SMSC_TRACE(pdata, hw, "Successfully verified " + "loopback packet"); + return 0; + } else { + SMSC_WARN(pdata, hw, "Data mismatch " + "during loop back test, will retry"); + } + } + } + + return -EIO; +} + +static int smsc911x_phy_reset(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + unsigned int temp; + unsigned int i = 100000; + + BUG_ON(!phy_dev); + BUG_ON(!phy_dev->bus); + + SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset"); + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET); + do { + msleep(1); + temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, + MII_BMCR); + } while ((i--) && (temp & BMCR_RESET)); + + if (temp & BMCR_RESET) { + SMSC_WARN(pdata, hw, "PHY reset failed to complete"); + return -EIO; + } + /* Extra delay required because the phy may not be completed with + * its reset when BMCR_RESET is cleared. Specs say 256 uS is + * enough delay but using 1ms here to be safe */ + msleep(1); + + return 0; +} + +static int smsc911x_phy_loopbacktest(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + int result = -EIO; + unsigned int i, val; + unsigned long flags; + + /* Initialise tx packet using broadcast destination address */ + memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN); + + /* Use incrementing source address */ + for (i = 6; i < 12; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + /* Set length type field */ + pdata->loopback_tx_pkt[12] = 0x00; + pdata->loopback_tx_pkt[13] = 0x00; + + for (i = 14; i < MIN_PACKET_SIZE; i++) + pdata->loopback_tx_pkt[i] = (char)i; + + val = smsc911x_reg_read(pdata, HW_CFG); + val &= HW_CFG_TX_FIF_SZ_; + val |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, val); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + smsc911x_reg_write(pdata, RX_CFG, + (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8); + + for (i = 0; i < 10; i++) { + /* Set PHY to 10/FD, no ANEG, and loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, + BMCR_LOOPBACK | BMCR_FULLDPLX); + + /* Enable MAC tx/rx, FD */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_ + | MAC_CR_TXEN_ | MAC_CR_RXEN_); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + if (smsc911x_phy_check_loopbackpkt(pdata) == 0) { + result = 0; + break; + } + pdata->resetcount++; + + /* Disable MAC rx */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_reset(pdata); + } + + /* Disable MAC */ + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, MAC_CR, 0); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + /* Cancel PHY loopback mode */ + smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0); + + smsc911x_reg_write(pdata, TX_CFG, 0); + smsc911x_reg_write(pdata, RX_CFG, 0); + + return result; +} +#endif /* USE_PHY_WORK_AROUND */ + +static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) +{ + struct phy_device *phy_dev = pdata->phy_dev; + u32 afc = smsc911x_reg_read(pdata, AFC_CFG); + u32 flow; + unsigned long flags; + + if (phy_dev->duplex == DUPLEX_FULL) { + u16 lcladv = phy_read(phy_dev, MII_ADVERTISE); + u16 rmtadv = phy_read(phy_dev, MII_LPA); + u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + + if (cap & FLOW_CTRL_RX) + flow = 0xFFFF0002; + else + flow = 0; + + if (cap & FLOW_CTRL_TX) + afc |= 0xF; + else + afc &= ~0xF; + + SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s", + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), + (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); + } else { + SMSC_TRACE(pdata, hw, "half duplex"); + flow = 0; + afc |= 0xF; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + smsc911x_mac_write(pdata, FLOW, flow); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_reg_write(pdata, AFC_CFG, afc); +} + +/* Update link mode if anything has changed. Called periodically when the + * PHY is in polling mode, even if nothing has changed. */ +static void smsc911x_phy_adjust_link(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + int carrier; + + if (phy_dev->duplex != pdata->last_duplex) { + unsigned int mac_cr; + SMSC_TRACE(pdata, hw, "duplex state has changed"); + + spin_lock_irqsave(&pdata->mac_lock, flags); + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + if (phy_dev->duplex) { + SMSC_TRACE(pdata, hw, + "configuring for full duplex mode"); + mac_cr |= MAC_CR_FDPX_; + } else { + SMSC_TRACE(pdata, hw, + "configuring for half duplex mode"); + mac_cr &= ~MAC_CR_FDPX_; + } + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + + smsc911x_phy_update_flowcontrol(pdata); + pdata->last_duplex = phy_dev->duplex; + } + + carrier = netif_carrier_ok(dev); + if (carrier != pdata->last_carrier) { + SMSC_TRACE(pdata, hw, "carrier state has changed"); + if (carrier) { + SMSC_TRACE(pdata, hw, "configuring for carrier OK"); + if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Restore original GPIO configuration */ + pdata->gpio_setting = pdata->gpio_orig_setting; + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } else { + SMSC_TRACE(pdata, hw, "configuring for no carrier"); + /* Check global setting that LED1 + * usage is 10/100 indicator */ + pdata->gpio_setting = smsc911x_reg_read(pdata, + GPIO_CFG); + if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && + (!pdata->using_extphy)) { + /* Force 10/100 LED off, after saving + * original GPIO configuration */ + pdata->gpio_orig_setting = pdata->gpio_setting; + + pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; + pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_ + | GPIO_CFG_GPIODIR0_ + | GPIO_CFG_GPIOD0_); + smsc911x_reg_write(pdata, GPIO_CFG, + pdata->gpio_setting); + } + } + pdata->last_carrier = carrier; + } +} + +static int smsc911x_mii_probe(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phydev = NULL; + int ret; + + /* find the first phy */ + phydev = phy_find_first(pdata->mii_bus); + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X", + phydev->addr, phydev->phy_id); + + ret = phy_connect_direct(dev, phydev, + &smsc911x_phy_adjust_link, 0, + pdata->config.phy_interface); + + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } + + netdev_info(dev, + "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq); + + /* mask with MAC supported features */ + phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phydev->advertising = phydev->supported; + + pdata->phy_dev = phydev; + pdata->last_duplex = -1; + pdata->last_carrier = -1; + +#ifdef USE_PHY_WORK_AROUND + if (smsc911x_phy_loopbacktest(dev) < 0) { + SMSC_WARN(pdata, hw, "Failed Loop Back Test"); + return -ENODEV; + } + SMSC_TRACE(pdata, hw, "Passed Loop Back Test"); +#endif /* USE_PHY_WORK_AROUND */ + + SMSC_TRACE(pdata, hw, "phy initialised successfully"); + return 0; +} + +static int __devinit smsc911x_mii_init(struct platform_device *pdev, + struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + int err = -ENXIO, i; + + pdata->mii_bus = mdiobus_alloc(); + if (!pdata->mii_bus) { + err = -ENOMEM; + goto err_out_1; + } + + pdata->mii_bus->name = SMSC_MDIONAME; + snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + pdata->mii_bus->priv = pdata; + pdata->mii_bus->read = smsc911x_mii_read; + pdata->mii_bus->write = smsc911x_mii_write; + pdata->mii_bus->irq = pdata->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + pdata->mii_bus->irq[i] = PHY_POLL; + + pdata->mii_bus->parent = &pdev->dev; + + switch (pdata->idrev & 0xFFFF0000) { + case 0x01170000: + case 0x01150000: + case 0x117A0000: + case 0x115A0000: + /* External PHY supported, try to autodetect */ + smsc911x_phy_initialise_external(pdata); + break; + default: + SMSC_TRACE(pdata, hw, "External PHY is not supported, " + "using internal PHY"); + pdata->using_extphy = 0; + break; + } + + if (!pdata->using_extphy) { + /* Mask all PHYs except ID 1 (internal) */ + pdata->mii_bus->phy_mask = ~(1 << 1); + } + + if (mdiobus_register(pdata->mii_bus)) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_free_bus_2; + } + + if (smsc911x_mii_probe(dev) < 0) { + SMSC_WARN(pdata, probe, "Error registering mii bus"); + goto err_out_unregister_bus_3; + } + + return 0; + +err_out_unregister_bus_3: + mdiobus_unregister(pdata->mii_bus); +err_out_free_bus_2: + mdiobus_free(pdata->mii_bus); +err_out_1: + return err; +} + +/* Gets the number of tx statuses in the fifo */ +static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata) +{ + return (smsc911x_reg_read(pdata, TX_FIFO_INF) + & TX_FIFO_INF_TSUSED_) >> 16; +} + +/* Reads tx statuses and increments counters where necessary */ +static void smsc911x_tx_update_txcounters(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int tx_stat; + + while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) { + if (unlikely(tx_stat & 0x80000000)) { + /* In this driver the packet tag is used as the packet + * length. Since a packet length can never reach the + * size of 0x8000, this bit is reserved. It is worth + * noting that the "reserved bit" in the warning above + * does not reference a hardware defined reserved bit + * but rather a driver defined one. + */ + SMSC_WARN(pdata, hw, "Packet tag reserved bit is high"); + } else { + if (unlikely(tx_stat & TX_STS_ES_)) { + dev->stats.tx_errors++; + } else { + dev->stats.tx_packets++; + dev->stats.tx_bytes += (tx_stat >> 16); + } + if (unlikely(tx_stat & TX_STS_EXCESS_COL_)) { + dev->stats.collisions += 16; + dev->stats.tx_aborted_errors += 1; + } else { + dev->stats.collisions += + ((tx_stat >> 3) & 0xF); + } + if (unlikely(tx_stat & TX_STS_LOST_CARRIER_)) + dev->stats.tx_carrier_errors += 1; + if (unlikely(tx_stat & TX_STS_LATE_COL_)) { + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; + } + } + } +} + +/* Increments the Rx error counters */ +static void +smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) +{ + int crc_err = 0; + + if (unlikely(rxstat & RX_STS_ES_)) { + dev->stats.rx_errors++; + if (unlikely(rxstat & RX_STS_CRC_ERR_)) { + dev->stats.rx_crc_errors++; + crc_err = 1; + } + } + if (likely(!crc_err)) { + if (unlikely((rxstat & RX_STS_FRAME_TYPE_) && + (rxstat & RX_STS_LENGTH_ERR_))) + dev->stats.rx_length_errors++; + if (rxstat & RX_STS_MCAST_) + dev->stats.multicast++; + } +} + +/* Quickly dumps bad packets */ +static void +smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) +{ + unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; + + if (likely(pktwords >= 4)) { + unsigned int timeout = 500; + unsigned int val; + smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_); + do { + udelay(1); + val = smsc911x_reg_read(pdata, RX_DP_CTRL); + } while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout); + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, hw, "Timed out waiting for " + "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val); + } else { + unsigned int temp; + while (pktwords--) + temp = smsc911x_reg_read(pdata, RX_DATA_FIFO); + } +} + +/* NAPI poll function */ +static int smsc911x_poll(struct napi_struct *napi, int budget) +{ + struct smsc911x_data *pdata = + container_of(napi, struct smsc911x_data, napi); + struct net_device *dev = pdata->dev; + int npackets = 0; + + while (npackets < budget) { + unsigned int pktlength; + unsigned int pktwords; + struct sk_buff *skb; + unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata); + + if (!rxstat) { + unsigned int temp; + /* We processed all packets available. Tell NAPI it can + * stop polling then re-enable rx interrupts */ + smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_); + napi_complete(napi); + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_RSFL_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + break; + } + + /* Count packet for NAPI scheduling, even if it has an error. + * Error packets still require cycles to discard */ + npackets++; + + pktlength = ((rxstat & 0x3FFF0000) >> 16); + pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2; + smsc911x_rx_counterrors(dev, rxstat); + + if (unlikely(rxstat & RX_STS_ES_)) { + SMSC_WARN(pdata, rx_err, + "Discarding packet with error bit set"); + /* Packet has an error, discard it and continue with + * the next */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + continue; + } + + skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); + if (unlikely(!skb)) { + SMSC_WARN(pdata, rx_err, + "Unable to allocate skb for rx packet"); + /* Drop the packet and stop this polling iteration */ + smsc911x_rx_fastforward(pdata, pktwords); + dev->stats.rx_dropped++; + break; + } + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + + /* Align IP on 16B boundary */ + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pktlength - 4); + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->head, pktwords); + skb->protocol = eth_type_trans(skb, dev); + skb_checksum_none_assert(skb); + netif_receive_skb(skb); + + /* Update counters */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += (pktlength - 4); + } + + /* Return total received packets */ + return npackets; +} + +/* Returns hash bit number for given MAC address + * Example: + * 01 00 5E 00 00 01 -> returns bit number 31 */ +static unsigned int smsc911x_hash(char addr[ETH_ALEN]) +{ + return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f; +} + +static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata) +{ + /* Performs the multicast & mac_cr update. This is called when + * safe on the current hardware, and with the mac_lock held */ + unsigned int mac_cr; + + SMSC_ASSERT_MAC_LOCK(pdata); + + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= pdata->set_bits_mask; + mac_cr &= ~(pdata->clear_bits_mask); + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + smsc911x_mac_write(pdata, HASHH, pdata->hashhi); + smsc911x_mac_write(pdata, HASHL, pdata->hashlo); + SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", + mac_cr, pdata->hashhi, pdata->hashlo); +} + +static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) +{ + unsigned int mac_cr; + + /* This function is only called for older LAN911x devices + * (revA or revB), where MAC_CR, HASHH and HASHL should not + * be modified during Rx - newer devices immediately update the + * registers. + * + * This is called from interrupt context */ + + spin_lock(&pdata->mac_lock); + + /* Check Rx has stopped */ + if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_) + SMSC_WARN(pdata, drv, "Rx not stopped"); + + /* Perform the update - safe to do now Rx has stopped */ + smsc911x_rx_multicast_update(pdata); + + /* Re-enable Rx */ + mac_cr = smsc911x_mac_read(pdata, MAC_CR); + mac_cr |= MAC_CR_RXEN_; + smsc911x_mac_write(pdata, MAC_CR, mac_cr); + + pdata->multicast_update_pending = 0; + + spin_unlock(&pdata->mac_lock); +} + +static int smsc911x_soft_reset(struct smsc911x_data *pdata) +{ + unsigned int timeout; + unsigned int temp; + + /* Reset the LAN911x */ + smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_); + timeout = 10; + do { + udelay(10); + temp = smsc911x_reg_read(pdata, HW_CFG); + } while ((--timeout) && (temp & HW_CFG_SRST_)); + + if (unlikely(temp & HW_CFG_SRST_)) { + SMSC_WARN(pdata, drv, "Failed to complete reset"); + return -EIO; + } + return 0; +} + +/* Sets the device MAC address to dev_addr, called with mac_lock held */ +static void +smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) +{ + u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4]; + u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + + SMSC_ASSERT_MAC_LOCK(pdata); + + smsc911x_mac_write(pdata, ADDRH, mac_high16); + smsc911x_mac_write(pdata, ADDRL, mac_low32); +} + +static int smsc911x_open(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int timeout; + unsigned int temp; + unsigned int intcfg; + + /* if the phy is not yet registered, retry later*/ + if (!pdata->phy_dev) { + SMSC_WARN(pdata, hw, "phy_dev is NULL"); + return -EAGAIN; + } + + if (!is_valid_ether_addr(dev->dev_addr)) { + SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address"); + return -EADDRNOTAVAIL; + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) { + SMSC_WARN(pdata, hw, "soft reset failed"); + return -EIO; + } + + smsc911x_reg_write(pdata, HW_CFG, 0x00050000); + smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); + + /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); + spin_unlock_irq(&pdata->mac_lock); + + /* Make sure EEPROM has finished loading before setting GPIO_CFG */ + timeout = 50; + while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && + --timeout) { + udelay(10); + } + + if (unlikely(timeout == 0)) + SMSC_WARN(pdata, ifup, + "Timed out waiting for EEPROM busy bit to clear"); + + smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000); + + /* The soft reset above cleared the device's MAC address, + * restore it from local copy (set in probe) */ + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + /* Initialise irqs, but leave all sources disabled */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + /* Set interrupt deassertion to 100uS */ + intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); + + if (pdata->config.irq_polarity) { + SMSC_TRACE(pdata, ifup, "irq polarity: active high"); + intcfg |= INT_CFG_IRQ_POL_; + } else { + SMSC_TRACE(pdata, ifup, "irq polarity: active low"); + } + + if (pdata->config.irq_type) { + SMSC_TRACE(pdata, ifup, "irq type: push-pull"); + intcfg |= INT_CFG_IRQ_TYPE_; + } else { + SMSC_TRACE(pdata, ifup, "irq type: open drain"); + } + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq); + pdata->software_irq_signal = 0; + smp_wmb(); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= INT_EN_SW_INT_EN_; + smsc911x_reg_write(pdata, INT_EN, temp); + + timeout = 1000; + while (timeout--) { + if (pdata->software_irq_signal) + break; + msleep(1); + } + + if (!pdata->software_irq_signal) { + netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", + dev->irq); + return -ENODEV; + } + SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", + dev->irq); + + netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n", + (unsigned long)pdata->ioaddr, dev->irq); + + /* Reset the last known duplex and carrier */ + pdata->last_duplex = -1; + pdata->last_carrier = -1; + + /* Bring the PHY up */ + phy_start(pdata->phy_dev); + + temp = smsc911x_reg_read(pdata, HW_CFG); + /* Preserve TX FIFO size and external PHY configuration */ + temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF); + temp |= HW_CFG_SF_; + smsc911x_reg_write(pdata, HW_CFG, temp); + + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + temp &= ~(FIFO_INT_RX_STS_LEVEL_); + smsc911x_reg_write(pdata, FIFO_INT, temp); + + /* set RX Data offset to 2 bytes for alignment */ + smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); + + /* enable NAPI polling before enabling RX interrupts */ + napi_enable(&pdata->napi); + + temp = smsc911x_reg_read(pdata, INT_EN); + temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_RXSTOP_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + + spin_lock_irq(&pdata->mac_lock); + temp = smsc911x_mac_read(pdata, MAC_CR); + temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_); + smsc911x_mac_write(pdata, MAC_CR, temp); + spin_unlock_irq(&pdata->mac_lock); + + smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_); + + netif_start_queue(dev); + return 0; +} + +/* Entry point for stopping the interface */ +static int smsc911x_stop(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int temp; + + /* Disable all device interrupts */ + temp = smsc911x_reg_read(pdata, INT_CFG); + temp &= ~INT_CFG_IRQ_EN_; + smsc911x_reg_write(pdata, INT_CFG, temp); + + /* Stop Tx and Rx polling */ + netif_stop_queue(dev); + napi_disable(&pdata->napi); + + /* At this point all Rx and Tx activity is stopped */ + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + smsc911x_tx_update_txcounters(dev); + + /* Bring the PHY down */ + if (pdata->phy_dev) + phy_stop(pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Interface stopped"); + return 0; +} + +/* Entry point for transmitting a packet */ +static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int freespace; + unsigned int tx_cmd_a; + unsigned int tx_cmd_b; + unsigned int temp; + u32 wrsz; + ulong bufp; + + freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_; + + if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD)) + SMSC_WARN(pdata, tx_err, + "Tx data fifo low, space available: %d", freespace); + + /* Word alignment adjustment */ + tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16; + tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + tx_cmd_a |= (unsigned int)skb->len; + + tx_cmd_b = ((unsigned int)skb->len) << 16; + tx_cmd_b |= (unsigned int)skb->len; + + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a); + smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b); + + bufp = (ulong)skb->data & (~0x3); + wrsz = (u32)skb->len + 3; + wrsz += (u32)((ulong)skb->data & 0x3); + wrsz >>= 2; + + pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); + freespace -= (skb->len + 32); + skb_tx_timestamp(skb); + dev_kfree_skb(skb); + + if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) + smsc911x_tx_update_txcounters(dev); + + if (freespace < TX_FIFO_LOW_THRESHOLD) { + netif_stop_queue(dev); + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp &= 0x00FFFFFF; + temp |= 0x32000000; + smsc911x_reg_write(pdata, FIFO_INT, temp); + } + + return NETDEV_TX_OK; +} + +/* Entry point for getting status counters */ +static struct net_device_stats *smsc911x_get_stats(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + smsc911x_tx_update_txcounters(dev); + dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); + return &dev->stats; +} + +/* Entry point for setting addressing modes */ +static void smsc911x_set_multicast_list(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned long flags; + + if (dev->flags & IFF_PROMISC) { + /* Enabling promiscuous mode */ + pdata->set_bits_mask = MAC_CR_PRMS_; + pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (dev->flags & IFF_ALLMULTI) { + /* Enabling all multicast mode */ + pdata->set_bits_mask = MAC_CR_MCPAS_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } else if (!netdev_mc_empty(dev)) { + /* Enabling specific multicast addresses */ + unsigned int hash_high = 0; + unsigned int hash_low = 0; + struct netdev_hw_addr *ha; + + pdata->set_bits_mask = MAC_CR_HPFILT_; + pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); + + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); + unsigned int mask = 0x01 << (bitnum & 0x1F); + + if (bitnum & 0x20) + hash_high |= mask; + else + hash_low |= mask; + } + + pdata->hashhi = hash_high; + pdata->hashlo = hash_low; + } else { + /* Enabling local MAC address only */ + pdata->set_bits_mask = 0; + pdata->clear_bits_mask = + (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_); + pdata->hashhi = 0; + pdata->hashlo = 0; + } + + spin_lock_irqsave(&pdata->mac_lock, flags); + + if (pdata->generation <= 1) { + /* Older hardware revision - cannot change these flags while + * receiving data */ + if (!pdata->multicast_update_pending) { + unsigned int temp; + SMSC_TRACE(pdata, hw, "scheduling mcast update"); + pdata->multicast_update_pending = 1; + + /* Request the hardware to stop, then perform the + * update when we get an RX_STOP interrupt */ + temp = smsc911x_mac_read(pdata, MAC_CR); + temp &= ~(MAC_CR_RXEN_); + smsc911x_mac_write(pdata, MAC_CR, temp); + } else { + /* There is another update pending, this should now + * use the newer values */ + } + } else { + /* Newer hardware revision - can write immediately */ + smsc911x_rx_multicast_update(pdata); + } + + spin_unlock_irqrestore(&pdata->mac_lock, flags); +} + +static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void smsc911x_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + smsc911x_irqhandler(0, dev); + enable_irq(dev->irq); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int smsc911x_set_mac_address(struct net_device *dev, void *p) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct sockaddr *addr = p; + + /* On older hardware revisions we cannot change the mac address + * registers while receiving data. Newer devices can safely change + * this at any time. */ + if (pdata->generation <= 1 && netif_running(dev)) + return -EBUSY; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + spin_lock_irq(&pdata->mac_lock); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; +} + +/* Standard ioctls for mii-tool */ +static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + if (!netif_running(dev) || !pdata->phy_dev) + return -EINVAL; + + return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); +} + +static int +smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + return phy_ethtool_gset(pdata->phy_dev, cmd); +} + +static int +smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_ethtool_sset(pdata->phy_dev, cmd); +} + +static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver)); + strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static int smsc911x_ethtool_nwayreset(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + + return phy_start_aneg(pdata->phy_dev); +} + +static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + return pdata->msg_enable; +} + +static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + pdata->msg_enable = level; +} + +static int smsc911x_ethtool_getregslen(struct net_device *dev) +{ + return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) * + sizeof(u32); +} + +static void +smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + struct phy_device *phy_dev = pdata->phy_dev; + unsigned long flags; + unsigned int i; + unsigned int j = 0; + u32 *data = buf; + + regs->version = pdata->idrev; + for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32))) + data[j++] = smsc911x_reg_read(pdata, i); + + for (i = MAC_CR; i <= WUCSR; i++) { + spin_lock_irqsave(&pdata->mac_lock, flags); + data[j++] = smsc911x_mac_read(pdata, i); + spin_unlock_irqrestore(&pdata->mac_lock, flags); + } + + for (i = 0; i <= 31; i++) + data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i); +} + +static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata) +{ + unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG); + temp &= ~GPIO_CFG_EEPR_EN_; + smsc911x_reg_write(pdata, GPIO_CFG, temp); + msleep(1); +} + +static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op) +{ + int timeout = 100; + u32 e2cmd; + + SMSC_TRACE(pdata, drv, "op 0x%08x", op); + if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) { + SMSC_WARN(pdata, drv, "Busy at start"); + return -EBUSY; + } + + e2cmd = op | E2P_CMD_EPC_BUSY_; + smsc911x_reg_write(pdata, E2P_CMD, e2cmd); + + do { + msleep(1); + e2cmd = smsc911x_reg_read(pdata, E2P_CMD); + } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout)); + + if (!timeout) { + SMSC_TRACE(pdata, drv, "TIMED OUT"); + return -EAGAIN; + } + + if (e2cmd & E2P_CMD_EPC_TIMEOUT_) { + SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation"); + return -EINVAL; + } + + return 0; +} + +static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata, + u8 address, u8 *data) +{ + u32 op = E2P_CMD_EPC_CMD_READ_ | address; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x", address); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) + data[address] = smsc911x_reg_read(pdata, E2P_DATA); + + return ret; +} + +static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata, + u8 address, u8 data) +{ + u32 op = E2P_CMD_EPC_CMD_ERASE_ | address; + u32 temp; + int ret; + + SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data); + ret = smsc911x_eeprom_send_cmd(pdata, op); + + if (!ret) { + op = E2P_CMD_EPC_CMD_WRITE_ | address; + smsc911x_reg_write(pdata, E2P_DATA, (u32)data); + + /* Workaround for hardware read-after-write restriction */ + temp = smsc911x_reg_read(pdata, BYTE_TEST); + + ret = smsc911x_eeprom_send_cmd(pdata, op); + } + + return ret; +} + +static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev) +{ + return SMSC911X_EEPROM_SIZE; +} + +static int smsc911x_ethtool_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u8 eeprom_data[SMSC911X_EEPROM_SIZE]; + int len; + int i; + + smsc911x_eeprom_enable_access(pdata); + + len = min(eeprom->len, SMSC911X_EEPROM_SIZE); + for (i = 0; i < len; i++) { + int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data); + if (ret < 0) { + eeprom->len = 0; + return ret; + } + } + + memcpy(data, &eeprom_data[eeprom->offset], len); + eeprom->len = len; + return 0; +} + +static int smsc911x_ethtool_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + int ret; + struct smsc911x_data *pdata = netdev_priv(dev); + + smsc911x_eeprom_enable_access(pdata); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_); + ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data); + smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_); + + /* Single byte write, according to man page */ + eeprom->len = 1; + + return ret; +} + +static const struct ethtool_ops smsc911x_ethtool_ops = { + .get_settings = smsc911x_ethtool_getsettings, + .set_settings = smsc911x_ethtool_setsettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = smsc911x_ethtool_getdrvinfo, + .nway_reset = smsc911x_ethtool_nwayreset, + .get_msglevel = smsc911x_ethtool_getmsglevel, + .set_msglevel = smsc911x_ethtool_setmsglevel, + .get_regs_len = smsc911x_ethtool_getregslen, + .get_regs = smsc911x_ethtool_getregs, + .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, + .get_eeprom = smsc911x_ethtool_get_eeprom, + .set_eeprom = smsc911x_ethtool_set_eeprom, +}; + +static const struct net_device_ops smsc911x_netdev_ops = { + .ndo_open = smsc911x_open, + .ndo_stop = smsc911x_stop, + .ndo_start_xmit = smsc911x_hard_start_xmit, + .ndo_get_stats = smsc911x_get_stats, + .ndo_set_rx_mode = smsc911x_set_multicast_list, + .ndo_do_ioctl = smsc911x_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = smsc911x_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = smsc911x_poll_controller, +#endif +}; + +/* copies the current mac address from hardware to dev->dev_addr */ +static void __devinit smsc911x_read_mac_address(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); + u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL); + + dev->dev_addr[0] = (u8)(mac_low32); + dev->dev_addr[1] = (u8)(mac_low32 >> 8); + dev->dev_addr[2] = (u8)(mac_low32 >> 16); + dev->dev_addr[3] = (u8)(mac_low32 >> 24); + dev->dev_addr[4] = (u8)(mac_high16); + dev->dev_addr[5] = (u8)(mac_high16 >> 8); +} + +/* Initializing private device structures, only called from probe */ +static int __devinit smsc911x_init(struct net_device *dev) +{ + struct smsc911x_data *pdata = netdev_priv(dev); + unsigned int byte_test; + + SMSC_TRACE(pdata, probe, "Driver Parameters:"); + SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX", + (unsigned long)pdata->ioaddr); + SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq); + SMSC_TRACE(pdata, probe, "PHY will be autodetected."); + + spin_lock_init(&pdata->dev_lock); + spin_lock_init(&pdata->mac_lock); + + if (pdata->ioaddr == 0) { + SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); + return -ENODEV; + } + + /* Check byte ordering */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test); + if (byte_test == 0x43218765) { + SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, " + "applying WORD_SWAP"); + smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff); + + /* 1 dummy read of BYTE_TEST is needed after a write to + * WORD_SWAP before its contents are valid */ + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + + byte_test = smsc911x_reg_read(pdata, BYTE_TEST); + } + + if (byte_test != 0x87654321) { + SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test); + if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) { + SMSC_WARN(pdata, probe, + "top 16 bits equal to bottom 16 bits"); + SMSC_TRACE(pdata, probe, + "This may mean the chip is set " + "for 32 bit while the bus is reading 16 bit"); + } + return -ENODEV; + } + + /* Default generation to zero (all workarounds apply) */ + pdata->generation = 0; + + pdata->idrev = smsc911x_reg_read(pdata, ID_REV); + switch (pdata->idrev & 0xFFFF0000) { + case 0x01180000: + case 0x01170000: + case 0x01160000: + case 0x01150000: ++ case 0x218A0000: + /* LAN911[5678] family */ + pdata->generation = pdata->idrev & 0x0000FFFF; + break; + + case 0x118A0000: + case 0x117A0000: + case 0x116A0000: + case 0x115A0000: + /* LAN921[5678] family */ + pdata->generation = 3; + break; + + case 0x92100000: + case 0x92110000: + case 0x92200000: + case 0x92210000: + /* LAN9210/LAN9211/LAN9220/LAN9221 */ + pdata->generation = 4; + break; + + default: + SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X", + pdata->idrev); + return -ENODEV; + } + + SMSC_TRACE(pdata, probe, + "LAN911x identified, idrev: 0x%08X, generation: %d", + pdata->idrev, pdata->generation); + + if (pdata->generation == 0) + SMSC_WARN(pdata, probe, + "This driver is not intended for this chip revision"); + + /* workaround for platforms without an eeprom, where the mac address + * is stored elsewhere and set by the bootloader. This saves the + * mac address before resetting the device */ + if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { + spin_lock_irq(&pdata->mac_lock); + smsc911x_read_mac_address(dev); + spin_unlock_irq(&pdata->mac_lock); + } + + /* Reset the LAN911x */ + if (smsc911x_soft_reset(pdata)) + return -ENODEV; + + /* Disable all interrupt sources until we bring the device up */ + smsc911x_reg_write(pdata, INT_EN, 0); + + ether_setup(dev); + dev->flags |= IFF_MULTICAST; + netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); + dev->netdev_ops = &smsc911x_netdev_ops; + dev->ethtool_ops = &smsc911x_ethtool_ops; + + return 0; +} + +static int __devexit smsc911x_drv_remove(struct platform_device *pdev) +{ + struct net_device *dev; + struct smsc911x_data *pdata; + struct resource *res; + + dev = platform_get_drvdata(pdev); + BUG_ON(!dev); + pdata = netdev_priv(dev); + BUG_ON(!pdata); + BUG_ON(!pdata->ioaddr); + BUG_ON(!pdata->phy_dev); + + SMSC_TRACE(pdata, ifdown, "Stopping driver"); + + phy_disconnect(pdata->phy_dev); + pdata->phy_dev = NULL; + mdiobus_unregister(pdata->mii_bus); + mdiobus_free(pdata->mii_bus); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(dev); + free_irq(dev->irq, dev); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + release_mem_region(res->start, resource_size(res)); + + iounmap(pdata->ioaddr); + + free_netdev(dev); + + return 0; +} + +/* standard register acces */ +static const struct smsc911x_ops standard_smsc911x_ops = { + .reg_read = __smsc911x_reg_read, + .reg_write = __smsc911x_reg_write, + .rx_readfifo = smsc911x_rx_readfifo, + .tx_writefifo = smsc911x_tx_writefifo, +}; + +/* shifted register access */ +static const struct smsc911x_ops shifted_smsc911x_ops = { + .reg_read = __smsc911x_reg_read_shift, + .reg_write = __smsc911x_reg_write_shift, + .rx_readfifo = smsc911x_rx_readfifo_shift, + .tx_writefifo = smsc911x_tx_writefifo_shift, +}; + +#ifdef CONFIG_OF +static int __devinit smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) +{ + const char *mac; + u32 width = 0; + + if (!np) + return -ENODEV; + + config->phy_interface = of_get_phy_mode(np); + + mac = of_get_mac_address(np); + if (mac) + memcpy(config->mac, mac, ETH_ALEN); + + of_property_read_u32(np, "reg-shift", &config->shift); + + of_property_read_u32(np, "reg-io-width", &width); + if (width == 4) + config->flags |= SMSC911X_USE_32BIT; + else + config->flags |= SMSC911X_USE_16BIT; + + if (of_get_property(np, "smsc,irq-active-high", NULL)) + config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; + + if (of_get_property(np, "smsc,irq-push-pull", NULL)) + config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; + + if (of_get_property(np, "smsc,force-internal-phy", NULL)) + config->flags |= SMSC911X_FORCE_INTERNAL_PHY; + + if (of_get_property(np, "smsc,force-external-phy", NULL)) + config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; + + if (of_get_property(np, "smsc,save-mac-address", NULL)) + config->flags |= SMSC911X_SAVE_MAC_ADDRESS; + + return 0; +} +#else +static inline int smsc911x_probe_config_dt( + struct smsc911x_platform_config *config, + struct device_node *np) +{ + return -ENODEV; +} +#endif /* CONFIG_OF */ + +static int __devinit smsc911x_drv_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct smsc911x_data *pdata; + struct smsc911x_platform_config *config = pdev->dev.platform_data; + struct resource *res, *irq_res; + unsigned int intcfg = 0; + int res_size, irq_flags; + int retval; + + pr_info("Driver version %s\n", SMSC_DRV_VERSION); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smsc911x-memory"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + pr_warn("Could not allocate resource\n"); + retval = -ENODEV; + goto out_0; + } + res_size = resource_size(res); + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + pr_warn("Could not allocate irq resource\n"); + retval = -ENODEV; + goto out_0; + } + + if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) { + retval = -EBUSY; + goto out_0; + } + + dev = alloc_etherdev(sizeof(struct smsc911x_data)); + if (!dev) { + pr_warn("Could not allocate device\n"); + retval = -ENOMEM; + goto out_release_io_1; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + + pdata = netdev_priv(dev); + + dev->irq = irq_res->start; + irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; + pdata->ioaddr = ioremap_nocache(res->start, res_size); + + pdata->dev = dev; + pdata->msg_enable = ((1 << debug) - 1); + + if (pdata->ioaddr == NULL) { + SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); + retval = -ENOMEM; + goto out_free_netdev_2; + } + + retval = smsc911x_probe_config_dt(&pdata->config, np); + if (retval && config) { + /* copy config parameters across to pdata */ + memcpy(&pdata->config, config, sizeof(pdata->config)); + retval = 0; + } + + if (retval) { + SMSC_WARN(pdata, probe, "Error smsc911x config not found"); + goto out_unmap_io_3; + } + + /* assume standard, non-shifted, access to HW registers */ + pdata->ops = &standard_smsc911x_ops; + /* apply the right access if shifting is needed */ + if (pdata->config.shift) + pdata->ops = &shifted_smsc911x_ops; + + retval = smsc911x_init(dev); + if (retval < 0) + goto out_unmap_io_3; + + /* configure irq polarity and type before connecting isr */ + if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) + intcfg |= INT_CFG_IRQ_POL_; + + if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) + intcfg |= INT_CFG_IRQ_TYPE_; + + smsc911x_reg_write(pdata, INT_CFG, intcfg); + + /* Ensure interrupts are globally disabled before connecting ISR */ + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); + + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto out_unmap_io_3; + } + + platform_set_drvdata(pdev, dev); + + retval = register_netdev(dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i registering device", retval); + goto out_unset_drvdata_4; + } else { + SMSC_TRACE(pdata, probe, + "Network interface: \"%s\"", dev->name); + } + + retval = smsc911x_mii_init(pdev, dev); + if (retval) { + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); + goto out_unregister_netdev_5; + } + + spin_lock_irq(&pdata->mac_lock); + + /* Check if mac address has been specified when bringing interface up */ + if (is_valid_ether_addr(dev->dev_addr)) { + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is specified by configuration"); + } else if (is_valid_ether_addr(pdata->config.mac)) { + memcpy(dev->dev_addr, pdata->config.mac, 6); + SMSC_TRACE(pdata, probe, + "MAC Address specified by platform data"); + } else { + /* Try reading mac address from device. if EEPROM is present + * it will already have been set */ + smsc_get_mac(dev); + + if (is_valid_ether_addr(dev->dev_addr)) { + /* eeprom values are valid so use them */ + SMSC_TRACE(pdata, probe, + "Mac Address is read from LAN911x EEPROM"); + } else { + /* eeprom values are invalid, generate random MAC */ + random_ether_addr(dev->dev_addr); + smsc911x_set_hw_mac_address(pdata, dev->dev_addr); + SMSC_TRACE(pdata, probe, + "MAC Address is set to random_ether_addr"); + } + } + + spin_unlock_irq(&pdata->mac_lock); + + netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr); + + return 0; + +out_unregister_netdev_5: + unregister_netdev(dev); +out_unset_drvdata_4: + platform_set_drvdata(pdev, NULL); + free_irq(dev->irq, dev); +out_unmap_io_3: + iounmap(pdata->ioaddr); +out_free_netdev_2: + free_netdev(dev); +out_release_io_1: + release_mem_region(res->start, resource_size(res)); +out_0: + return retval; +} + +#ifdef CONFIG_PM +/* This implementation assumes the devices remains powered on its VDDVARIO + * pins during suspend. */ + +/* TODO: implement freeze/thaw callbacks for hibernation.*/ + +static int smsc911x_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + + /* enable wake on LAN, energy detection and the external PME + * signal. */ + smsc911x_reg_write(pdata, PMT_CTRL, + PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | + PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); + + return 0; +} + +static int smsc911x_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct smsc911x_data *pdata = netdev_priv(ndev); + unsigned int to = 100; + + /* Note 3.11 from the datasheet: + * "When the LAN9220 is in a power saving state, a write of any + * data to the BYTE_TEST register will wake-up the device." + */ + smsc911x_reg_write(pdata, BYTE_TEST, 0); + + /* poll the READY bit in PMT_CTRL. Any other access to the device is + * forbidden while this bit isn't set. Try for 100ms and return -EIO + * if it failed. */ + while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) + udelay(1000); + + return (to == 0) ? -EIO : 0; +} + +static const struct dev_pm_ops smsc911x_pm_ops = { + .suspend = smsc911x_suspend, + .resume = smsc911x_resume, +}; + +#define SMSC911X_PM_OPS (&smsc911x_pm_ops) + +#else +#define SMSC911X_PM_OPS NULL +#endif + +static const struct of_device_id smsc911x_dt_ids[] = { + { .compatible = "smsc,lan9115", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); + +static struct platform_driver smsc911x_driver = { + .probe = smsc911x_drv_probe, + .remove = __devexit_p(smsc911x_drv_remove), + .driver = { + .name = SMSC_CHIPNAME, + .owner = THIS_MODULE, + .pm = SMSC911X_PM_OPS, + .of_match_table = smsc911x_dt_ids, + }, +}; + +/* Entry point for loading the module */ +static int __init smsc911x_init_module(void) +{ + SMSC_INITIALIZE(); + return platform_driver_register(&smsc911x_driver); +} + +/* entry point for unloading the module */ +static void __exit smsc911x_cleanup_module(void) +{ + platform_driver_unregister(&smsc911x_driver); +} + +module_init(smsc911x_init_module); +module_exit(smsc911x_cleanup_module); diff --cc drivers/net/ppp/pptp.c index eae542a7e987,000000000000..89f829f5f725 mode 100644,000000..100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@@ -1,717 -1,0 +1,723 @@@ +/* + * Point-to-Point Tunneling Protocol for Linux + * + * Authors: Dmitry Kozlov + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#define PPTP_DRIVER_VERSION "0.8.5" + +#define MAX_CALLID 65535 + +static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1); +static struct pppox_sock **callid_sock; + +static DEFINE_SPINLOCK(chan_lock); + +static struct proto pptp_sk_proto __read_mostly; +static const struct ppp_channel_ops pptp_chan_ops; +static const struct proto_ops pptp_ops; + +#define PPP_LCP_ECHOREQ 0x09 +#define PPP_LCP_ECHOREP 0x0A +#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) + +#define MISSING_WINDOW 20 +#define WRAPPED(curseq, lastseq)\ + ((((curseq) & 0xffffff00) == 0) &&\ + (((lastseq) & 0xffffff00) == 0xffffff00)) + +#define PPTP_GRE_PROTO 0x880B +#define PPTP_GRE_VER 0x1 + +#define PPTP_GRE_FLAG_C 0x80 +#define PPTP_GRE_FLAG_R 0x40 +#define PPTP_GRE_FLAG_K 0x20 +#define PPTP_GRE_FLAG_S 0x10 +#define PPTP_GRE_FLAG_A 0x80 + +#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C) +#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R) +#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K) +#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S) +#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A) + +#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header)) +struct pptp_gre_header { + u8 flags; + u8 ver; + u16 protocol; + u16 payload_len; + u16 call_id; + u32 seq; + u32 ack; +} __packed; + +static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) +{ + struct pppox_sock *sock; + struct pptp_opt *opt; + + rcu_read_lock(); + sock = rcu_dereference(callid_sock[call_id]); + if (sock) { + opt = &sock->proto.pptp; + if (opt->dst_addr.sin_addr.s_addr != s_addr) + sock = NULL; + else + sock_hold(sk_pppox(sock)); + } + rcu_read_unlock(); + + return sock; +} + +static int lookup_chan_dst(u16 call_id, __be32 d_addr) +{ + struct pppox_sock *sock; + struct pptp_opt *opt; + int i; + + rcu_read_lock(); + for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID; + i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) { + sock = rcu_dereference(callid_sock[i]); + if (!sock) + continue; + opt = &sock->proto.pptp; + if (opt->dst_addr.call_id == call_id && + opt->dst_addr.sin_addr.s_addr == d_addr) + break; + } + rcu_read_unlock(); + + return i < MAX_CALLID; +} + +static int add_chan(struct pppox_sock *sock) +{ + static int call_id; + + spin_lock(&chan_lock); + if (!sock->proto.pptp.src_addr.call_id) { + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); + if (call_id == MAX_CALLID) { + call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); + if (call_id == MAX_CALLID) + goto out_err; + } + sock->proto.pptp.src_addr.call_id = call_id; + } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) + goto out_err; + + set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); + rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); + spin_unlock(&chan_lock); + + return 0; + +out_err: + spin_unlock(&chan_lock); + return -1; +} + +static void del_chan(struct pppox_sock *sock) +{ + spin_lock(&chan_lock); + clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); + rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); + spin_unlock(&chan_lock); + synchronize_rcu(); +} + +static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) +{ + struct sock *sk = (struct sock *) chan->private; + struct pppox_sock *po = pppox_sk(sk); + struct pptp_opt *opt = &po->proto.pptp; + struct pptp_gre_header *hdr; + unsigned int header_len = sizeof(*hdr); + struct flowi4 fl4; + int islcp; + int len; + unsigned char *data; + __u32 seq_recv; + + + struct rtable *rt; + struct net_device *tdev; + struct iphdr *iph; + int max_headroom; + + if (sk_pppox(po)->sk_state & PPPOX_DEAD) + goto tx_error; + + rt = ip_route_output_ports(&init_net, &fl4, NULL, + opt->dst_addr.sin_addr.s_addr, + opt->src_addr.sin_addr.s_addr, + 0, 0, IPPROTO_GRE, + RT_TOS(0), 0); + if (IS_ERR(rt)) + goto tx_error; + + tdev = rt->dst.dev; + + max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; + + if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { + struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); + if (!new_skb) { + ip_rt_put(rt); + goto tx_error; + } + if (skb->sk) + skb_set_owner_w(new_skb, skb->sk); + kfree_skb(skb); + skb = new_skb; + } + + data = skb->data; + islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7; + + /* compress protocol field */ + if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp) + skb_pull(skb, 1); + + /* Put in the address/control bytes if necessary */ + if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) { + data = skb_push(skb, 2); + data[0] = PPP_ALLSTATIONS; + data[1] = PPP_UI; + } + + len = skb->len; + + seq_recv = opt->seq_recv; + + if (opt->ack_sent == seq_recv) + header_len -= sizeof(hdr->ack); + + /* Push down and install GRE header */ + skb_push(skb, header_len); + hdr = (struct pptp_gre_header *)(skb->data); + + hdr->flags = PPTP_GRE_FLAG_K; + hdr->ver = PPTP_GRE_VER; + hdr->protocol = htons(PPTP_GRE_PROTO); + hdr->call_id = htons(opt->dst_addr.call_id); + + hdr->flags |= PPTP_GRE_FLAG_S; + hdr->seq = htonl(++opt->seq_sent); + if (opt->ack_sent != seq_recv) { + /* send ack with this message */ + hdr->ver |= PPTP_GRE_FLAG_A; + hdr->ack = htonl(seq_recv); + opt->ack_sent = seq_recv; + } + hdr->payload_len = htons(len); + + /* Push down and install the IP header. */ + + skb_reset_transport_header(skb); + skb_push(skb, sizeof(*iph)); + skb_reset_network_header(skb); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); + + iph = ip_hdr(skb); + iph->version = 4; + iph->ihl = sizeof(struct iphdr) >> 2; + if (ip_dont_fragment(sk, &rt->dst)) + iph->frag_off = htons(IP_DF); + else + iph->frag_off = 0; + iph->protocol = IPPROTO_GRE; + iph->tos = 0; + iph->daddr = fl4.daddr; + iph->saddr = fl4.saddr; + iph->ttl = ip4_dst_hoplimit(&rt->dst); + iph->tot_len = htons(skb->len); + + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + + nf_reset(skb); + + skb->ip_summed = CHECKSUM_NONE; + ip_select_ident(iph, &rt->dst, NULL); + ip_send_check(iph); + + ip_local_out(skb); ++ return 1; + +tx_error: ++ kfree_skb(skb); + return 1; +} + +static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) +{ + struct pppox_sock *po = pppox_sk(sk); + struct pptp_opt *opt = &po->proto.pptp; + int headersize, payload_len, seq; + __u8 *payload; + struct pptp_gre_header *header; + + if (!(sk->sk_state & PPPOX_CONNECTED)) { + if (sock_queue_rcv_skb(sk, skb)) + goto drop; + return NET_RX_SUCCESS; + } + + header = (struct pptp_gre_header *)(skb->data); ++ headersize = sizeof(*header); + + /* test if acknowledgement present */ + if (PPTP_GRE_IS_A(header->ver)) { - __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? - header->ack : header->seq; /* ack in different place if S = 0 */ ++ __u32 ack; ++ ++ if (!pskb_may_pull(skb, headersize)) ++ goto drop; ++ header = (struct pptp_gre_header *)(skb->data); ++ ++ /* ack in different place if S = 0 */ ++ ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; + + ack = ntohl(ack); + + if (ack > opt->ack_recv) + opt->ack_recv = ack; + /* also handle sequence number wrap-around */ + if (WRAPPED(ack, opt->ack_recv)) + opt->ack_recv = ack; ++ } else { ++ headersize -= sizeof(header->ack); + } - + /* test if payload present */ + if (!PPTP_GRE_IS_S(header->flags)) + goto drop; + - headersize = sizeof(*header); + payload_len = ntohs(header->payload_len); + seq = ntohl(header->seq); + - /* no ack present? */ - if (!PPTP_GRE_IS_A(header->ver)) - headersize -= sizeof(header->ack); + /* check for incomplete packet (length smaller than expected) */ - if (skb->len - headersize < payload_len) ++ if (!pskb_may_pull(skb, headersize + payload_len)) + goto drop; + + payload = skb->data + headersize; + /* check for expected sequence number */ + if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) { + if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) && + (PPP_PROTOCOL(payload) == PPP_LCP) && + ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP))) + goto allow_packet; + } else { + opt->seq_recv = seq; +allow_packet: + skb_pull(skb, headersize); + + if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) { + /* chop off address/control */ + if (skb->len < 3) + goto drop; + skb_pull(skb, 2); + } + + if ((*skb->data) & 1) { + /* protocol is compressed */ + skb_push(skb, 1)[0] = 0; + } + + skb->ip_summed = CHECKSUM_NONE; + skb_set_network_header(skb, skb->head-skb->data); + ppp_input(&po->chan, skb); + + return NET_RX_SUCCESS; + } +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int pptp_rcv(struct sk_buff *skb) +{ + struct pppox_sock *po; + struct pptp_gre_header *header; + struct iphdr *iph; + + if (skb->pkt_type != PACKET_HOST) + goto drop; + + if (!pskb_may_pull(skb, 12)) + goto drop; + + iph = ip_hdr(skb); + + header = (struct pptp_gre_header *)skb->data; + + if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */ + PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */ + PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */ + !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */ + (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */ + /* if invalid, discard this packet */ + goto drop; + + po = lookup_chan(htons(header->call_id), iph->saddr); + if (po) { + skb_dst_drop(skb); + nf_reset(skb); + return sk_receive_skb(sk_pppox(po), skb, 0); + } +drop: + kfree_skb(skb); + return NET_RX_DROP; +} + +static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, + int sockaddr_len) +{ + struct sock *sk = sock->sk; + struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; + struct pppox_sock *po = pppox_sk(sk); + struct pptp_opt *opt = &po->proto.pptp; + int error = 0; + + lock_sock(sk); + + opt->src_addr = sp->sa_addr.pptp; + if (add_chan(po)) { + release_sock(sk); + error = -EBUSY; + } + + release_sock(sk); + return error; +} + +static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, + int sockaddr_len, int flags) +{ + struct sock *sk = sock->sk; + struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; + struct pppox_sock *po = pppox_sk(sk); + struct pptp_opt *opt = &po->proto.pptp; + struct rtable *rt; + struct flowi4 fl4; + int error = 0; + + if (sp->sa_protocol != PX_PROTO_PPTP) + return -EINVAL; + + if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr)) + return -EALREADY; + + lock_sock(sk); + /* Check for already bound sockets */ + if (sk->sk_state & PPPOX_CONNECTED) { + error = -EBUSY; + goto end; + } + + /* Check for already disconnected sockets, on attempts to disconnect */ + if (sk->sk_state & PPPOX_DEAD) { + error = -EALREADY; + goto end; + } + + if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) { + error = -EINVAL; + goto end; + } + + po->chan.private = sk; + po->chan.ops = &pptp_chan_ops; + + rt = ip_route_output_ports(&init_net, &fl4, sk, + opt->dst_addr.sin_addr.s_addr, + opt->src_addr.sin_addr.s_addr, + 0, 0, + IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); + if (IS_ERR(rt)) { + error = -EHOSTUNREACH; + goto end; + } + sk_setup_caps(sk, &rt->dst); + + po->chan.mtu = dst_mtu(&rt->dst); + if (!po->chan.mtu) + po->chan.mtu = PPP_MTU; + ip_rt_put(rt); + po->chan.mtu -= PPTP_HEADER_OVERHEAD; + + po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); + error = ppp_register_channel(&po->chan); + if (error) { + pr_err("PPTP: failed to register PPP channel (%d)\n", error); + goto end; + } + + opt->dst_addr = sp->sa_addr.pptp; + sk->sk_state = PPPOX_CONNECTED; + + end: + release_sock(sk); + return error; +} + +static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, + int *usockaddr_len, int peer) +{ + int len = sizeof(struct sockaddr_pppox); + struct sockaddr_pppox sp; + + sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_PPTP; + sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; + + memcpy(uaddr, &sp, len); + + *usockaddr_len = len; + + return 0; +} + +static int pptp_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct pppox_sock *po; + struct pptp_opt *opt; + int error = 0; + + if (!sk) + return 0; + + lock_sock(sk); + + if (sock_flag(sk, SOCK_DEAD)) { + release_sock(sk); + return -EBADF; + } + + po = pppox_sk(sk); + opt = &po->proto.pptp; + del_chan(po); + + pppox_unbind_sock(sk); + sk->sk_state = PPPOX_DEAD; + + sock_orphan(sk); + sock->sk = NULL; + + release_sock(sk); + sock_put(sk); + + return error; +} + +static void pptp_sock_destruct(struct sock *sk) +{ + if (!(sk->sk_state & PPPOX_DEAD)) { + del_chan(pppox_sk(sk)); + pppox_unbind_sock(sk); + } + skb_queue_purge(&sk->sk_receive_queue); +} + +static int pptp_create(struct net *net, struct socket *sock) +{ + int error = -ENOMEM; + struct sock *sk; + struct pppox_sock *po; + struct pptp_opt *opt; + + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto); + if (!sk) + goto out; + + sock_init_data(sock, sk); + + sock->state = SS_UNCONNECTED; + sock->ops = &pptp_ops; + + sk->sk_backlog_rcv = pptp_rcv_core; + sk->sk_state = PPPOX_NONE; + sk->sk_type = SOCK_STREAM; + sk->sk_family = PF_PPPOX; + sk->sk_protocol = PX_PROTO_PPTP; + sk->sk_destruct = pptp_sock_destruct; + + po = pppox_sk(sk); + opt = &po->proto.pptp; + + opt->seq_sent = 0; opt->seq_recv = 0; + opt->ack_recv = 0; opt->ack_sent = 0; + + error = 0; +out: + return error; +} + +static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd, + unsigned long arg) +{ + struct sock *sk = (struct sock *) chan->private; + struct pppox_sock *po = pppox_sk(sk); + struct pptp_opt *opt = &po->proto.pptp; + void __user *argp = (void __user *)arg; + int __user *p = argp; + int err, val; + + err = -EFAULT; + switch (cmd) { + case PPPIOCGFLAGS: + val = opt->ppp_flags; + if (put_user(val, p)) + break; + err = 0; + break; + case PPPIOCSFLAGS: + if (get_user(val, p)) + break; + opt->ppp_flags = val & ~SC_RCV_BITS; + err = 0; + break; + default: + err = -ENOTTY; + } + + return err; +} + +static const struct ppp_channel_ops pptp_chan_ops = { + .start_xmit = pptp_xmit, + .ioctl = pptp_ppp_ioctl, +}; + +static struct proto pptp_sk_proto __read_mostly = { + .name = "PPTP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct pppox_sock), +}; + +static const struct proto_ops pptp_ops = { + .family = AF_PPPOX, + .owner = THIS_MODULE, + .release = pptp_release, + .bind = pptp_bind, + .connect = pptp_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = pptp_getname, + .poll = sock_no_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = sock_no_setsockopt, + .getsockopt = sock_no_getsockopt, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .mmap = sock_no_mmap, + .ioctl = pppox_ioctl, +}; + +static const struct pppox_proto pppox_pptp_proto = { + .create = pptp_create, + .owner = THIS_MODULE, +}; + +static const struct gre_protocol gre_pptp_protocol = { + .handler = pptp_rcv, +}; + +static int __init pptp_init_module(void) +{ + int err = 0; + pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); + + callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *)); + if (!callid_sock) { + pr_err("PPTP: cann't allocate memory\n"); + return -ENOMEM; + } + + err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP); + if (err) { + pr_err("PPTP: can't add gre protocol\n"); + goto out_mem_free; + } + + err = proto_register(&pptp_sk_proto, 0); + if (err) { + pr_err("PPTP: can't register sk_proto\n"); + goto out_gre_del_protocol; + } + + err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto); + if (err) { + pr_err("PPTP: can't register pppox_proto\n"); + goto out_unregister_sk_proto; + } + + return 0; + +out_unregister_sk_proto: + proto_unregister(&pptp_sk_proto); +out_gre_del_protocol: + gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); +out_mem_free: + vfree(callid_sock); + + return err; +} + +static void __exit pptp_exit_module(void) +{ + unregister_pppox_proto(PX_PROTO_PPTP); + proto_unregister(&pptp_sk_proto); + gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); + vfree(callid_sock); +} + +module_init(pptp_init_module); +module_exit(pptp_exit_module); + +MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol"); +MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); +MODULE_LICENSE("GPL");