1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
20 #include <net/checksum.h>
21 #include "net_driver.h"
25 #include "workarounds.h"
27 /* Number of RX descriptors pushed at once. */
28 #define EFX_RX_BATCH 8
30 /* Maximum size of a buffer sharing a page */
31 #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
33 /* Size of buffer allocated for skb header area. */
34 #define EFX_SKB_HEADERS 64u
37 * rx_alloc_method - RX buffer allocation method
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
59 * Per channel we maintain a single variable, updated by each channel:
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
69 static int rx_alloc_method
= RX_ALLOC_METHOD_AUTO
;
71 #define RX_ALLOC_LEVEL_GRO 0x2000
72 #define RX_ALLOC_LEVEL_MAX 0x3000
73 #define RX_ALLOC_FACTOR_GRO 1
74 #define RX_ALLOC_FACTOR_SKB (-2)
76 /* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring.
79 static unsigned int rx_refill_threshold
;
82 * RX maximum head room required.
84 * This must be at least 1 to prevent overflow and at least 2 to allow
87 #define EFX_RXD_HEAD_ROOM 2
89 /* Offset of ethernet header within page */
90 static inline unsigned int efx_rx_buf_offset(struct efx_nic
*efx
,
91 struct efx_rx_buffer
*buf
)
93 /* Offset is always within one page, so we don't need to consider
96 return ((unsigned int) buf
->dma_addr
& (PAGE_SIZE
- 1)) +
97 efx
->type
->rx_buffer_hash_size
;
99 static inline unsigned int efx_rx_buf_size(struct efx_nic
*efx
)
101 return PAGE_SIZE
<< efx
->rx_buffer_order
;
104 static u8
*efx_rx_buf_eh(struct efx_nic
*efx
, struct efx_rx_buffer
*buf
)
106 if (buf
->flags
& EFX_RX_BUF_PAGE
)
107 return page_address(buf
->u
.page
) + efx_rx_buf_offset(efx
, buf
);
109 return (u8
*)buf
->u
.skb
->data
+ efx
->type
->rx_buffer_hash_size
;
112 static inline u32
efx_rx_buf_hash(const u8
*eh
)
114 /* The ethernet header is always directly after any hash. */
115 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
116 return __le32_to_cpup((const __le32
*)(eh
- 4));
118 const u8
*data
= eh
- 4;
119 return (u32
)data
[0] |
127 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
129 * @rx_queue: Efx RX queue
131 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
132 * struct efx_rx_buffer for each one. Return a negative error code or 0
133 * on success. May fail having only inserted fewer than EFX_RX_BATCH
136 static int efx_init_rx_buffers_skb(struct efx_rx_queue
*rx_queue
)
138 struct efx_nic
*efx
= rx_queue
->efx
;
139 struct net_device
*net_dev
= efx
->net_dev
;
140 struct efx_rx_buffer
*rx_buf
;
142 int skb_len
= efx
->rx_buffer_len
;
143 unsigned index
, count
;
145 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
146 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
147 rx_buf
= efx_rx_buffer(rx_queue
, index
);
149 rx_buf
->u
.skb
= skb
= netdev_alloc_skb(net_dev
, skb_len
);
153 /* Adjust the SKB for padding */
154 skb_reserve(skb
, NET_IP_ALIGN
);
155 rx_buf
->len
= skb_len
- NET_IP_ALIGN
;
158 rx_buf
->dma_addr
= pci_map_single(efx
->pci_dev
,
159 skb
->data
, rx_buf
->len
,
161 if (unlikely(pci_dma_mapping_error(efx
->pci_dev
,
162 rx_buf
->dma_addr
))) {
163 dev_kfree_skb_any(skb
);
164 rx_buf
->u
.skb
= NULL
;
168 ++rx_queue
->added_count
;
169 ++rx_queue
->alloc_skb_count
;
176 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
178 * @rx_queue: Efx RX queue
180 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
181 * and populates struct efx_rx_buffers for each one. Return a negative error
182 * code or 0 on success. If a single page can be split between two buffers,
183 * then the page will either be inserted fully, or not at at all.
185 static int efx_init_rx_buffers_page(struct efx_rx_queue
*rx_queue
)
187 struct efx_nic
*efx
= rx_queue
->efx
;
188 struct efx_rx_buffer
*rx_buf
;
191 struct efx_rx_page_state
*state
;
193 unsigned index
, count
;
195 /* We can split a page between two buffers */
196 BUILD_BUG_ON(EFX_RX_BATCH
& 1);
198 for (count
= 0; count
< EFX_RX_BATCH
; ++count
) {
199 page
= alloc_pages(__GFP_COLD
| __GFP_COMP
| GFP_ATOMIC
,
200 efx
->rx_buffer_order
);
201 if (unlikely(page
== NULL
))
203 dma_addr
= pci_map_page(efx
->pci_dev
, page
, 0,
204 efx_rx_buf_size(efx
),
206 if (unlikely(pci_dma_mapping_error(efx
->pci_dev
, dma_addr
))) {
207 __free_pages(page
, efx
->rx_buffer_order
);
210 page_addr
= page_address(page
);
213 state
->dma_addr
= dma_addr
;
215 page_addr
+= sizeof(struct efx_rx_page_state
);
216 dma_addr
+= sizeof(struct efx_rx_page_state
);
219 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
220 rx_buf
= efx_rx_buffer(rx_queue
, index
);
221 rx_buf
->dma_addr
= dma_addr
+ EFX_PAGE_IP_ALIGN
;
222 rx_buf
->u
.page
= page
;
223 rx_buf
->len
= efx
->rx_buffer_len
- EFX_PAGE_IP_ALIGN
;
224 rx_buf
->flags
= EFX_RX_BUF_PAGE
;
225 ++rx_queue
->added_count
;
226 ++rx_queue
->alloc_page_count
;
229 if ((~count
& 1) && (efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
)) {
230 /* Use the second half of the page */
232 dma_addr
+= (PAGE_SIZE
>> 1);
233 page_addr
+= (PAGE_SIZE
>> 1);
242 static void efx_unmap_rx_buffer(struct efx_nic
*efx
,
243 struct efx_rx_buffer
*rx_buf
)
245 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
246 struct efx_rx_page_state
*state
;
248 state
= page_address(rx_buf
->u
.page
);
249 if (--state
->refcnt
== 0) {
250 pci_unmap_page(efx
->pci_dev
,
252 efx_rx_buf_size(efx
),
255 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
256 pci_unmap_single(efx
->pci_dev
, rx_buf
->dma_addr
,
257 rx_buf
->len
, PCI_DMA_FROMDEVICE
);
261 static void efx_free_rx_buffer(struct efx_nic
*efx
,
262 struct efx_rx_buffer
*rx_buf
)
264 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.page
) {
265 __free_pages(rx_buf
->u
.page
, efx
->rx_buffer_order
);
266 rx_buf
->u
.page
= NULL
;
267 } else if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
) && rx_buf
->u
.skb
) {
268 dev_kfree_skb_any(rx_buf
->u
.skb
);
269 rx_buf
->u
.skb
= NULL
;
273 static void efx_fini_rx_buffer(struct efx_rx_queue
*rx_queue
,
274 struct efx_rx_buffer
*rx_buf
)
276 efx_unmap_rx_buffer(rx_queue
->efx
, rx_buf
);
277 efx_free_rx_buffer(rx_queue
->efx
, rx_buf
);
280 /* Attempt to resurrect the other receive buffer that used to share this page,
281 * which had previously been passed up to the kernel and freed. */
282 static void efx_resurrect_rx_buffer(struct efx_rx_queue
*rx_queue
,
283 struct efx_rx_buffer
*rx_buf
)
285 struct efx_rx_page_state
*state
= page_address(rx_buf
->u
.page
);
286 struct efx_rx_buffer
*new_buf
;
287 unsigned fill_level
, index
;
289 /* +1 because efx_rx_packet() incremented removed_count. +1 because
290 * we'd like to insert an additional descriptor whilst leaving
291 * EFX_RXD_HEAD_ROOM for the non-recycle path */
292 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
+ 2);
293 if (unlikely(fill_level
> rx_queue
->max_fill
)) {
294 /* We could place "state" on a list, and drain the list in
295 * efx_fast_push_rx_descriptors(). For now, this will do. */
300 get_page(rx_buf
->u
.page
);
302 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
303 new_buf
= efx_rx_buffer(rx_queue
, index
);
304 new_buf
->dma_addr
= rx_buf
->dma_addr
^ (PAGE_SIZE
>> 1);
305 new_buf
->u
.page
= rx_buf
->u
.page
;
306 new_buf
->len
= rx_buf
->len
;
307 new_buf
->flags
= EFX_RX_BUF_PAGE
;
308 ++rx_queue
->added_count
;
311 /* Recycle the given rx buffer directly back into the rx_queue. There is
312 * always room to add this buffer, because we've just popped a buffer. */
313 static void efx_recycle_rx_buffer(struct efx_channel
*channel
,
314 struct efx_rx_buffer
*rx_buf
)
316 struct efx_nic
*efx
= channel
->efx
;
317 struct efx_rx_queue
*rx_queue
= efx_channel_get_rx_queue(channel
);
318 struct efx_rx_buffer
*new_buf
;
321 rx_buf
->flags
&= EFX_RX_BUF_PAGE
;
323 if ((rx_buf
->flags
& EFX_RX_BUF_PAGE
) &&
324 efx
->rx_buffer_len
<= EFX_RX_HALF_PAGE
&&
325 page_count(rx_buf
->u
.page
) == 1)
326 efx_resurrect_rx_buffer(rx_queue
, rx_buf
);
328 index
= rx_queue
->added_count
& rx_queue
->ptr_mask
;
329 new_buf
= efx_rx_buffer(rx_queue
, index
);
331 memcpy(new_buf
, rx_buf
, sizeof(*new_buf
));
332 rx_buf
->u
.page
= NULL
;
333 ++rx_queue
->added_count
;
337 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
338 * @rx_queue: RX descriptor queue
339 * This will aim to fill the RX descriptor queue up to
340 * @rx_queue->@max_fill. If there is insufficient atomic
341 * memory to do so, a slow fill will be scheduled.
343 * The caller must provide serialisation (none is used here). In practise,
344 * this means this function must run from the NAPI handler, or be called
345 * when NAPI is disabled.
347 void efx_fast_push_rx_descriptors(struct efx_rx_queue
*rx_queue
)
349 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
353 /* Calculate current fill level, and exit if we don't need to fill */
354 fill_level
= (rx_queue
->added_count
- rx_queue
->removed_count
);
355 EFX_BUG_ON_PARANOID(fill_level
> rx_queue
->efx
->rxq_entries
);
356 if (fill_level
>= rx_queue
->fast_fill_trigger
)
359 /* Record minimum fill level */
360 if (unlikely(fill_level
< rx_queue
->min_fill
)) {
362 rx_queue
->min_fill
= fill_level
;
365 space
= rx_queue
->max_fill
- fill_level
;
366 EFX_BUG_ON_PARANOID(space
< EFX_RX_BATCH
);
368 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
369 "RX queue %d fast-filling descriptor ring from"
370 " level %d to level %d using %s allocation\n",
371 efx_rx_queue_index(rx_queue
), fill_level
,
373 channel
->rx_alloc_push_pages
? "page" : "skb");
376 if (channel
->rx_alloc_push_pages
)
377 rc
= efx_init_rx_buffers_page(rx_queue
);
379 rc
= efx_init_rx_buffers_skb(rx_queue
);
381 /* Ensure that we don't leave the rx queue empty */
382 if (rx_queue
->added_count
== rx_queue
->removed_count
)
383 efx_schedule_slow_fill(rx_queue
);
386 } while ((space
-= EFX_RX_BATCH
) >= EFX_RX_BATCH
);
388 netif_vdbg(rx_queue
->efx
, rx_status
, rx_queue
->efx
->net_dev
,
389 "RX queue %d fast-filled descriptor ring "
390 "to level %d\n", efx_rx_queue_index(rx_queue
),
391 rx_queue
->added_count
- rx_queue
->removed_count
);
394 if (rx_queue
->notified_count
!= rx_queue
->added_count
)
395 efx_nic_notify_rx_desc(rx_queue
);
398 void efx_rx_slow_fill(unsigned long context
)
400 struct efx_rx_queue
*rx_queue
= (struct efx_rx_queue
*)context
;
402 /* Post an event to cause NAPI to run and refill the queue */
403 efx_nic_generate_fill_event(rx_queue
);
404 ++rx_queue
->slow_fill_count
;
407 static void efx_rx_packet__check_len(struct efx_rx_queue
*rx_queue
,
408 struct efx_rx_buffer
*rx_buf
,
409 int len
, bool *leak_packet
)
411 struct efx_nic
*efx
= rx_queue
->efx
;
412 unsigned max_len
= rx_buf
->len
- efx
->type
->rx_buffer_padding
;
414 if (likely(len
<= max_len
))
417 /* The packet must be discarded, but this is only a fatal error
418 * if the caller indicated it was
420 rx_buf
->flags
|= EFX_RX_PKT_DISCARD
;
422 if ((len
> rx_buf
->len
) && EFX_WORKAROUND_8071(efx
)) {
424 netif_err(efx
, rx_err
, efx
->net_dev
,
425 " RX queue %d seriously overlength "
426 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
427 efx_rx_queue_index(rx_queue
), len
, max_len
,
428 efx
->type
->rx_buffer_padding
);
429 /* If this buffer was skb-allocated, then the meta
430 * data at the end of the skb will be trashed. So
431 * we have no choice but to leak the fragment.
433 *leak_packet
= !(rx_buf
->flags
& EFX_RX_BUF_PAGE
);
434 efx_schedule_reset(efx
, RESET_TYPE_RX_RECOVERY
);
437 netif_err(efx
, rx_err
, efx
->net_dev
,
438 " RX queue %d overlength RX event "
440 efx_rx_queue_index(rx_queue
), len
, max_len
);
443 efx_rx_queue_channel(rx_queue
)->n_rx_overlength
++;
446 /* Pass a received packet up through GRO. GRO can handle pages
447 * regardless of checksum state and skbs with a good checksum.
449 static void efx_rx_packet_gro(struct efx_channel
*channel
,
450 struct efx_rx_buffer
*rx_buf
,
453 struct napi_struct
*napi
= &channel
->napi_str
;
454 gro_result_t gro_result
;
456 if (rx_buf
->flags
& EFX_RX_BUF_PAGE
) {
457 struct efx_nic
*efx
= channel
->efx
;
458 struct page
*page
= rx_buf
->u
.page
;
461 rx_buf
->u
.page
= NULL
;
463 skb
= napi_get_frags(napi
);
469 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
470 skb
->rxhash
= efx_rx_buf_hash(eh
);
472 skb_fill_page_desc(skb
, 0, page
,
473 efx_rx_buf_offset(efx
, rx_buf
), rx_buf
->len
);
475 skb
->len
= rx_buf
->len
;
476 skb
->data_len
= rx_buf
->len
;
477 skb
->truesize
+= rx_buf
->len
;
478 skb
->ip_summed
= ((rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ?
479 CHECKSUM_UNNECESSARY
: CHECKSUM_NONE
);
481 skb_record_rx_queue(skb
, channel
->channel
);
483 gro_result
= napi_gro_frags(napi
);
485 struct sk_buff
*skb
= rx_buf
->u
.skb
;
487 EFX_BUG_ON_PARANOID(!(rx_buf
->flags
& EFX_RX_PKT_CSUMMED
));
488 rx_buf
->u
.skb
= NULL
;
489 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
491 gro_result
= napi_gro_receive(napi
, skb
);
494 if (gro_result
== GRO_NORMAL
) {
495 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
496 } else if (gro_result
!= GRO_DROP
) {
497 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_GRO
;
498 channel
->irq_mod_score
+= 2;
502 void efx_rx_packet(struct efx_rx_queue
*rx_queue
, unsigned int index
,
503 unsigned int len
, u16 flags
)
505 struct efx_nic
*efx
= rx_queue
->efx
;
506 struct efx_channel
*channel
= efx_rx_queue_channel(rx_queue
);
507 struct efx_rx_buffer
*rx_buf
;
508 bool leak_packet
= false;
510 rx_buf
= efx_rx_buffer(rx_queue
, index
);
511 rx_buf
->flags
|= flags
;
513 /* This allows the refill path to post another buffer.
514 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
515 * isn't overwritten yet.
517 rx_queue
->removed_count
++;
519 /* Validate the length encoded in the event vs the descriptor pushed */
520 efx_rx_packet__check_len(rx_queue
, rx_buf
, len
, &leak_packet
);
522 netif_vdbg(efx
, rx_status
, efx
->net_dev
,
523 "RX queue %d received id %x at %llx+%x %s%s\n",
524 efx_rx_queue_index(rx_queue
), index
,
525 (unsigned long long)rx_buf
->dma_addr
, len
,
526 (rx_buf
->flags
& EFX_RX_PKT_CSUMMED
) ? " [SUMMED]" : "",
527 (rx_buf
->flags
& EFX_RX_PKT_DISCARD
) ? " [DISCARD]" : "");
529 /* Discard packet, if instructed to do so */
530 if (unlikely(rx_buf
->flags
& EFX_RX_PKT_DISCARD
)) {
531 if (unlikely(leak_packet
))
532 channel
->n_skbuff_leaks
++;
534 efx_recycle_rx_buffer(channel
, rx_buf
);
536 /* Don't hold off the previous receive */
541 /* Release card resources - assumes all RX buffers consumed in-order
544 efx_unmap_rx_buffer(efx
, rx_buf
);
546 /* Prefetch nice and early so data will (hopefully) be in cache by
547 * the time we look at it.
549 prefetch(efx_rx_buf_eh(efx
, rx_buf
));
551 /* Pipeline receives so that we give time for packet headers to be
552 * prefetched into cache.
554 rx_buf
->len
= len
- efx
->type
->rx_buffer_hash_size
;
557 __efx_rx_packet(channel
, channel
->rx_pkt
);
558 channel
->rx_pkt
= rx_buf
;
561 static void efx_rx_deliver(struct efx_channel
*channel
,
562 struct efx_rx_buffer
*rx_buf
)
566 /* We now own the SKB */
568 rx_buf
->u
.skb
= NULL
;
570 /* Set the SKB flags */
571 skb_checksum_none_assert(skb
);
573 /* Pass the packet up */
574 netif_receive_skb(skb
);
576 /* Update allocation strategy method */
577 channel
->rx_alloc_level
+= RX_ALLOC_FACTOR_SKB
;
580 /* Handle a received packet. Second half: Touches packet payload. */
581 void __efx_rx_packet(struct efx_channel
*channel
, struct efx_rx_buffer
*rx_buf
)
583 struct efx_nic
*efx
= channel
->efx
;
584 u8
*eh
= efx_rx_buf_eh(efx
, rx_buf
);
586 /* If we're in loopback test, then pass the packet directly to the
587 * loopback layer, and free the rx_buf here
589 if (unlikely(efx
->loopback_selftest
)) {
590 efx_loopback_rx_packet(efx
, eh
, rx_buf
->len
);
591 efx_free_rx_buffer(efx
, rx_buf
);
595 if (!(rx_buf
->flags
& EFX_RX_BUF_PAGE
)) {
596 struct sk_buff
*skb
= rx_buf
->u
.skb
;
598 prefetch(skb_shinfo(skb
));
600 skb_reserve(skb
, efx
->type
->rx_buffer_hash_size
);
601 skb_put(skb
, rx_buf
->len
);
603 if (efx
->net_dev
->features
& NETIF_F_RXHASH
)
604 skb
->rxhash
= efx_rx_buf_hash(eh
);
606 /* Move past the ethernet header. rx_buf->data still points
607 * at the ethernet header */
608 skb
->protocol
= eth_type_trans(skb
, efx
->net_dev
);
610 skb_record_rx_queue(skb
, channel
->channel
);
613 if (unlikely(!(efx
->net_dev
->features
& NETIF_F_RXCSUM
)))
614 rx_buf
->flags
&= ~EFX_RX_PKT_CSUMMED
;
616 if (likely(rx_buf
->flags
& (EFX_RX_BUF_PAGE
| EFX_RX_PKT_CSUMMED
)))
617 efx_rx_packet_gro(channel
, rx_buf
, eh
);
619 efx_rx_deliver(channel
, rx_buf
);
622 void efx_rx_strategy(struct efx_channel
*channel
)
624 enum efx_rx_alloc_method method
= rx_alloc_method
;
626 /* Only makes sense to use page based allocation if GRO is enabled */
627 if (!(channel
->efx
->net_dev
->features
& NETIF_F_GRO
)) {
628 method
= RX_ALLOC_METHOD_SKB
;
629 } else if (method
== RX_ALLOC_METHOD_AUTO
) {
630 /* Constrain the rx_alloc_level */
631 if (channel
->rx_alloc_level
< 0)
632 channel
->rx_alloc_level
= 0;
633 else if (channel
->rx_alloc_level
> RX_ALLOC_LEVEL_MAX
)
634 channel
->rx_alloc_level
= RX_ALLOC_LEVEL_MAX
;
636 /* Decide on the allocation method */
637 method
= ((channel
->rx_alloc_level
> RX_ALLOC_LEVEL_GRO
) ?
638 RX_ALLOC_METHOD_PAGE
: RX_ALLOC_METHOD_SKB
);
641 /* Push the option */
642 channel
->rx_alloc_push_pages
= (method
== RX_ALLOC_METHOD_PAGE
);
645 int efx_probe_rx_queue(struct efx_rx_queue
*rx_queue
)
647 struct efx_nic
*efx
= rx_queue
->efx
;
648 unsigned int entries
;
651 /* Create the smallest power-of-two aligned ring */
652 entries
= max(roundup_pow_of_two(efx
->rxq_entries
), EFX_MIN_DMAQ_SIZE
);
653 EFX_BUG_ON_PARANOID(entries
> EFX_MAX_DMAQ_SIZE
);
654 rx_queue
->ptr_mask
= entries
- 1;
656 netif_dbg(efx
, probe
, efx
->net_dev
,
657 "creating RX queue %d size %#x mask %#x\n",
658 efx_rx_queue_index(rx_queue
), efx
->rxq_entries
,
661 /* Allocate RX buffers */
662 rx_queue
->buffer
= kcalloc(entries
, sizeof(*rx_queue
->buffer
),
664 if (!rx_queue
->buffer
)
667 rc
= efx_nic_probe_rx(rx_queue
);
669 kfree(rx_queue
->buffer
);
670 rx_queue
->buffer
= NULL
;
675 void efx_init_rx_queue(struct efx_rx_queue
*rx_queue
)
677 struct efx_nic
*efx
= rx_queue
->efx
;
678 unsigned int max_fill
, trigger
, max_trigger
;
680 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
681 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue
));
683 /* Initialise ptr fields */
684 rx_queue
->added_count
= 0;
685 rx_queue
->notified_count
= 0;
686 rx_queue
->removed_count
= 0;
687 rx_queue
->min_fill
= -1U;
689 /* Initialise limit fields */
690 max_fill
= efx
->rxq_entries
- EFX_RXD_HEAD_ROOM
;
691 max_trigger
= max_fill
- EFX_RX_BATCH
;
692 if (rx_refill_threshold
!= 0) {
693 trigger
= max_fill
* min(rx_refill_threshold
, 100U) / 100U;
694 if (trigger
> max_trigger
)
695 trigger
= max_trigger
;
697 trigger
= max_trigger
;
700 rx_queue
->max_fill
= max_fill
;
701 rx_queue
->fast_fill_trigger
= trigger
;
703 /* Set up RX descriptor ring */
704 rx_queue
->enabled
= true;
705 efx_nic_init_rx(rx_queue
);
708 void efx_fini_rx_queue(struct efx_rx_queue
*rx_queue
)
711 struct efx_rx_buffer
*rx_buf
;
713 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
714 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue
));
716 /* A flush failure might have left rx_queue->enabled */
717 rx_queue
->enabled
= false;
719 del_timer_sync(&rx_queue
->slow_fill
);
720 efx_nic_fini_rx(rx_queue
);
722 /* Release RX buffers NB start at index 0 not current HW ptr */
723 if (rx_queue
->buffer
) {
724 for (i
= 0; i
<= rx_queue
->ptr_mask
; i
++) {
725 rx_buf
= efx_rx_buffer(rx_queue
, i
);
726 efx_fini_rx_buffer(rx_queue
, rx_buf
);
731 void efx_remove_rx_queue(struct efx_rx_queue
*rx_queue
)
733 netif_dbg(rx_queue
->efx
, drv
, rx_queue
->efx
->net_dev
,
734 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue
));
736 efx_nic_remove_rx(rx_queue
);
738 kfree(rx_queue
->buffer
);
739 rx_queue
->buffer
= NULL
;
743 module_param(rx_alloc_method
, int, 0644);
744 MODULE_PARM_DESC(rx_alloc_method
, "Allocation method used for RX buffers");
746 module_param(rx_refill_threshold
, uint
, 0444);
747 MODULE_PARM_DESC(rx_refill_threshold
,
748 "RX descriptor ring refill threshold (%)");