vector = dev->eq_table[vector % ibdev->num_comp_vectors];
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
- cq->db.dma, &cq->mcq, vector, 0);
+ cq->db.dma, &cq->mcq, vector, 0, 0);
if (err)
goto err_dbmap;
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
- en_resources.o en_netdev.o en_selftest.o
+ en_resources.o en_netdev.o en_selftest.o en_clock.o
mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
__mlx4_cq_free_icm(dev, cqn);
}
-int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
- struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed)
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
+ struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
+ struct mlx4_cq *cq, unsigned vector, int collapsed,
+ int timestamp_en)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
memset(cq_context, 0, sizeof *cq_context);
cq_context->flags = cpu_to_be32(!!collapsed << 18);
+ if (timestamp_en)
+ cq_context->flags |= cpu_to_be32(1 << 19);
+
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
--- /dev/null
+/*
+ * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/device.h>
+
+#include "mlx4_en.h"
+
+int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ mlx4_en_free_resources(priv);
+
+ en_warn(priv, "Changing Time Stamp configuration\n");
+
+ priv->hwtstamp_config.tx_type = tx_type;
+ priv->hwtstamp_config.rx_filter = rx_filter;
+
+ if (rx_filter != HWTSTAMP_FILTER_NONE)
+ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ err = mlx4_en_alloc_resources(priv);
+ if (err) {
+ en_err(priv, "Failed reallocating port resources\n");
+ goto out;
+ }
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ netdev_features_change(dev);
+ return err;
+}
+
+/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc)
+{
+ struct mlx4_en_dev *mdev =
+ container_of(tc, struct mlx4_en_dev, cycles);
+ struct mlx4_dev *dev = mdev->dev;
+
+ return mlx4_read_clock(dev) & tc->mask;
+}
+
+u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
+{
+ u64 hi, lo;
+ struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
+
+ lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
+ hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
+
+ return hi | lo;
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+
+ memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
+ hwts->hwtstamp = ns_to_ktime(nsec);
+}
+
+void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_dev *dev = mdev->dev;
+
+ memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ mdev->cycles.read = mlx4_en_read_clock;
+ mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+ /* Using shift to make calculation more accurate. Since current HW
+ * clock frequency is 427 MHz, and cycles are given using a 48 bits
+ * register, the biggest shift when calculating using u64, is 14
+ * (max_cycles * multiplier < 2^64)
+ */
+ mdev->cycles.shift = 14;
+ mdev->cycles.mult =
+ clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+
+ timecounter_init(&mdev->clock, &mdev->cycles,
+ ktime_to_ns(ktime_get_real()));
+}
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
char name[25];
+ int timestamp_en = 0;
struct cpu_rmap *rmap =
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap;
if (!cq->is_tx)
cq->size = priv->rx_ring[cq->ring].actual_size;
- err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
+ if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
+ (!cq->is_tx && priv->hwtstamp_config.rx_filter))
+ timestamp_en = 1;
+
+ err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
+ &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
+ cq->vector, 0, timestamp_en);
if (err)
return err;
return err;
}
+static int mlx4_en_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int ret;
+
+ ret = ethtool_op_get_ts_info(dev, info);
+ if (ret)
+ return ret;
+
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
+ info->so_timestamping |=
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ }
+
+ return ret;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
+ .get_ts_info = mlx4_en_get_ts_info,
};
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
}
+
+ /* Initialize time stamp mechanism */
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_init_timestamp(mdev);
+
return mdev;
err_mr:
return 0;
}
+static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* device doesn't support time stamping */
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
+ return -EINVAL;
+
+ /* TX HW timestamp */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* RX HW timestamp */
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
+ config.tx_type = HWTSTAMP_TX_OFF;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return mlx4_en_hwtstamp_ioctl(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
+ .ndo_do_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
spin_lock_init(&priv->filters_lock);
#endif
+ /* Initialize time stamping config */
+ priv->hwtstamp_config.flags = 0;
+ priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
int user_prio, struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
memset(context, 0, sizeof *context);
context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
+ context->param3 |= cpu_to_be32(1 << 30);
}
}
ring->buf = ring->wqres.buf.direct.buf;
+ ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
+
return 0;
err_hwq:
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
int polled = 0;
int ip_summed;
int factor = priv->cqe_factor;
+ u64 timestamp;
if (!priv->port_up)
return 0;
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
+ if ((cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
skb_record_rx_queue(gro_skb, cq->ring);
- napi_gro_frags(&cq->napi);
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev,
+ skb_hwtstamps(gro_skb),
+ timestamp);
+ }
+
+ napi_gro_frags(&cq->napi);
goto next;
}
if (dev->features & NETIF_F_RXHASH)
skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
- if (be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK)
+ if ((be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK) &&
+ (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+ if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+ mlx4_en_fill_hwtstamps(mdev, skb_hwtstamps(skb),
+ timestamp);
+ }
+
/* Push it up the stack */
netif_receive_skb(skb);
} else
ring->bf_enabled = true;
+ ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
+
return 0;
err_map:
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner)
+ int index, u8 owner, u64 timestamp)
{
+ struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
int i;
__be32 *ptr = (__be32 *)tx_desc;
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+ struct skb_shared_hwtstamps hwts;
+
+ if (timestamp) {
+ mlx4_en_fill_hwtstamps(mdev, &hwts, timestamp);
+ skb_tstamp_tx(skb, &hwts);
+ }
/* Optimize the common case when there are no wraparounds */
if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
- !!(ring->cons & ring->size));
+ !!(ring->cons & ring->size), 0);
ring->cons += ring->last_nr_txbb;
cnt++;
}
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
+ u64 timestamp = 0;
if (!priv->port_up)
return;
do {
txbbs_skipped += ring->last_nr_txbb;
ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ if (ring->tx_info[ring_index].ts_requested)
+ timestamp = mlx4_en_get_cqe_ts(cqe);
+
/* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
- ring->size));
+ ring->size), timestamp);
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ /*
+ * For timestamping add flag to skb_shinfo and
+ * set flag for further reference
+ */
+ if (ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_info->ts_requested = 1;
+ }
+
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
io_mapping_free(mlx4_priv(dev)->bf_mapping);
}
+cycle_t mlx4_read_clock(struct mlx4_dev *dev)
+{
+ u32 clockhi, clocklo, clockhi1;
+ cycle_t cycles;
+ int i;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ for (i = 0; i < 10; i++) {
+ clockhi = swab32(readl(priv->clock_mapping));
+ clocklo = swab32(readl(priv->clock_mapping + 4));
+ clockhi1 = swab32(readl(priv->clock_mapping));
+ if (clockhi == clockhi1)
+ break;
+ }
+
+ cycles = (u64) clockhi << 32 | (u64) clocklo;
+
+ return cycles;
+}
+EXPORT_SYMBOL_GPL(mlx4_read_clock);
+
+
static int map_internal_clock(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
#ifdef CONFIG_MLX4_EN_DCB
#include <linux/dcbnl.h>
#endif
u8 linear;
u8 data_offset;
u8 inl;
+ u8 ts_requested;
};
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
+ int hwtstamp_tx_type;
};
struct mlx4_en_rx_desc {
unsigned long packets;
unsigned long csum_ok;
unsigned long csum_none;
+ int hwtstamp_rx_filter;
};
struct mlx4_en_cq {
u32 priv_pdn;
spinlock_t uar_lock;
u8 mac_removed[MLX4_MAX_PORTS + 1];
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ unsigned long last_overflow_check;
};
struct device *ddev;
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
+ struct hwtstamp_config hwtstamp_config;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
u64 mlx4_en_mac_to_u64(u8 *addr);
/*
- * Globals
+ * Functions for time stamping
+ */
+u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp);
+void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
+int mlx4_en_timestamp_config(struct net_device *dev,
+ int tx_type,
+ int rx_filter);
+
+/* Globals
*/
extern const struct ethtool_ops mlx4_en_ethtool_ops;
u8 owner_sr_opcode;
};
+struct mlx4_ts_cqe {
+ __be32 vlan_my_qpn;
+ __be32 immed_rss_invalid;
+ __be32 g_mlpath_rqpn;
+ __be32 timestamp_hi;
+ __be16 status;
+ u8 ipv6_ext_mask;
+ u8 badfcs_enc;
+ __be32 byte_cnt;
+ __be16 wqe_index;
+ __be16 checksum;
+ u8 reserved;
+ __be16 timestamp_lo;
+ u8 owner_sr_opcode;
+} __packed;
+
enum {
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_QPN_MASK = 0xffffff,
#include <linux/atomic.h>
+#include <linux/clocksource.h>
+
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
#define MSIX_LEGACY_SZ 4
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed);
+ unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
+cycle_t mlx4_read_clock(struct mlx4_dev *dev);
+
#endif /* MLX4_DEVICE_H */