* and populates a struct efx_rx_buffer with the relevant
* information. Return a negative error code or 0 on success.
*/
-static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
* and populates a struct efx_rx_buffer with the relevant
* information. Return a negative error code or 0 on success.
*/
-static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = rx_queue->efx;
int bytes, space, offset;
* and populates a struct efx_rx_buffer with the relevant
* information.
*/
-static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *new_rx_buf)
+static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *new_rx_buf)
{
int rc = 0;
return rc;
}
-static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
EFX_BUG_ON_PARANOID(rx_buf->skb);
}
}
-static inline void efx_free_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
__free_pages(rx_buf->page, efx->rx_buffer_order);
}
}
-static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
efx_free_rx_buffer(rx_queue->efx, rx_buf);
efx_schedule_slow_fill(rx_queue, 1);
}
-static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- int len, bool *discard,
- bool *leak_packet)
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ int len, bool *discard,
+ bool *leak_packet)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
* Handles driverlink veto, and passes the fragment up via
* the appropriate LRO method
*/
-static inline void efx_rx_packet_lro(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_rx_packet_lro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
{
struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
void *priv = channel;
}
/* Allocate and construct an SKB around a struct page.*/
-static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
- struct efx_nic *efx,
- int hdr_len)
+static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
+ struct efx_nic *efx,
+ int hdr_len)
{
struct sk_buff *skb;
* We want to be able to nest calls to netif_stop_queue(), since each
* channel can have an individual stop on the queue.
*/
-inline void efx_wake_queue(struct efx_nic *efx)
+void efx_wake_queue(struct efx_nic *efx)
{
local_bh_disable();
if (atomic_dec_and_lock(&efx->netif_stop_count,
local_bh_enable();
}
-static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh);
-static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
{
if (buffer->tsoh) {
if (likely(!buffer->tsoh->unmap_len)) {
* Returns NETDEV_TX_OK or NETDEV_TX_BUSY
* You must hold netif_tx_lock() to call this function.
*/
-static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb)
+static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
struct pci_dev *pci_dev = efx->pci_dev;
* This removes packets from the TX queue, up to and including the
* specified index.
*/
-static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
* Verify that our various assumptions about sk_buffs and the conditions
* under which TSO will be attempted hold true.
*/
-static inline void efx_tso_check_safe(const struct sk_buff *skb)
+static void efx_tso_check_safe(const struct sk_buff *skb)
{
EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
* a single fragment, and we know it doesn't cross a page boundary. It
* also allows us to not worry about end-of-packet etc.
*/
-static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
- struct efx_tso_header *tsoh, unsigned len)
+static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh, unsigned len)
{
struct efx_tx_buffer *buffer;
/* Parse the SKB header and initialise state. */
-static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static void tso_start(struct tso_state *st, const struct sk_buff *skb)
{
/* All ethernet/IP/TCP headers combined size is TCP header size
* plus offset of TCP header relative to start of packet.
st->unmap_single = false;
}
-static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
- skb_frag_t *frag)
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ skb_frag_t *frag)
{
st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
frag->page_offset, frag->size,
return -ENOMEM;
}
-static inline int
-tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
- const struct sk_buff *skb)
+static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
{
int hl = st->header_len;
int len = skb_headlen(skb) - hl;
* of fragment or end-of-packet. Return 0 on success, 1 if not enough
* space in @tx_queue.
*/
-static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
+static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
{
struct efx_tx_buffer *buffer;
int n, end_of_packet, rc;
* Generate a new header and prepare for the new packet. Return 0 on
* success, or -1 if failed to alloc header.
*/
-static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
- const struct sk_buff *skb,
- struct tso_state *st)
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
{
struct efx_tso_header *tsoh;
struct iphdr *tsoh_iph;