Commit | Line | Data |
---|---|---|
8ceee660 BH |
1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | |
3 | * Copyright 2005-2006 Fen Systems Ltd. | |
0a6f40c6 | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
11 | #include <linux/socket.h> | |
12 | #include <linux/in.h> | |
5a0e3ad6 | 13 | #include <linux/slab.h> |
8ceee660 BH |
14 | #include <linux/ip.h> |
15 | #include <linux/tcp.h> | |
16 | #include <linux/udp.h> | |
70c71606 | 17 | #include <linux/prefetch.h> |
6eb07caf | 18 | #include <linux/moduleparam.h> |
8ceee660 BH |
19 | #include <net/ip.h> |
20 | #include <net/checksum.h> | |
21 | #include "net_driver.h" | |
8ceee660 | 22 | #include "efx.h" |
744093c9 | 23 | #include "nic.h" |
3273c2e8 | 24 | #include "selftest.h" |
8ceee660 BH |
25 | #include "workarounds.h" |
26 | ||
27 | /* Number of RX descriptors pushed at once. */ | |
28 | #define EFX_RX_BATCH 8 | |
29 | ||
62b330ba SH |
30 | /* Maximum size of a buffer sharing a page */ |
31 | #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) | |
32 | ||
8ceee660 BH |
33 | /* Size of buffer allocated for skb header area. */ |
34 | #define EFX_SKB_HEADERS 64u | |
35 | ||
36 | /* | |
37 | * rx_alloc_method - RX buffer allocation method | |
38 | * | |
39 | * This driver supports two methods for allocating and using RX buffers: | |
40 | * each RX buffer may be backed by an skb or by an order-n page. | |
41 | * | |
4afb7527 | 42 | * When GRO is in use then the second method has a lower overhead, |
8ceee660 BH |
43 | * since we don't have to allocate then free skbs on reassembled frames. |
44 | * | |
45 | * Values: | |
46 | * - RX_ALLOC_METHOD_AUTO = 0 | |
47 | * - RX_ALLOC_METHOD_SKB = 1 | |
48 | * - RX_ALLOC_METHOD_PAGE = 2 | |
49 | * | |
50 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | |
51 | * controlled by the parameters below. | |
52 | * | |
53 | * - Since pushing and popping descriptors are separated by the rx_queue | |
54 | * size, so the watermarks should be ~rxd_size. | |
4afb7527 | 55 | * - The performance win by using page-based allocation for GRO is less |
56 | * than the performance hit of using page-based allocation of non-GRO, | |
8ceee660 BH |
57 | * so the watermarks should reflect this. |
58 | * | |
59 | * Per channel we maintain a single variable, updated by each channel: | |
60 | * | |
4afb7527 | 61 | * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : |
8ceee660 BH |
62 | * RX_ALLOC_FACTOR_SKB) |
63 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | |
64 | * limits the hysteresis), and update the allocation strategy: | |
65 | * | |
4afb7527 | 66 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? |
8ceee660 BH |
67 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
68 | */ | |
c3c63365 | 69 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
8ceee660 | 70 | |
4afb7527 | 71 | #define RX_ALLOC_LEVEL_GRO 0x2000 |
8ceee660 | 72 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
4afb7527 | 73 | #define RX_ALLOC_FACTOR_GRO 1 |
8ceee660 BH |
74 | #define RX_ALLOC_FACTOR_SKB (-2) |
75 | ||
76 | /* This is the percentage fill level below which new RX descriptors | |
77 | * will be added to the RX descriptor ring. | |
78 | */ | |
64235187 | 79 | static unsigned int rx_refill_threshold; |
8ceee660 | 80 | |
8ceee660 BH |
81 | /* |
82 | * RX maximum head room required. | |
83 | * | |
84 | * This must be at least 1 to prevent overflow and at least 2 to allow | |
62b330ba | 85 | * pipelined receives. |
8ceee660 | 86 | */ |
62b330ba | 87 | #define EFX_RXD_HEAD_ROOM 2 |
8ceee660 | 88 | |
a526f140 SH |
89 | /* Offset of ethernet header within page */ |
90 | static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, | |
91 | struct efx_rx_buffer *buf) | |
55668611 BH |
92 | { |
93 | /* Offset is always within one page, so we don't need to consider | |
94 | * the page order. | |
95 | */ | |
06e63c57 | 96 | return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) + |
0beaca2c | 97 | efx->type->rx_buffer_hash_size; |
55668611 BH |
98 | } |
99 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |
100 | { | |
101 | return PAGE_SIZE << efx->rx_buffer_order; | |
102 | } | |
8ceee660 | 103 | |
a526f140 | 104 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) |
39c9cf07 | 105 | { |
db339569 | 106 | if (buf->flags & EFX_RX_BUF_PAGE) |
a526f140 SH |
107 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); |
108 | else | |
0beaca2c | 109 | return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; |
a526f140 SH |
110 | } |
111 | ||
112 | static inline u32 efx_rx_buf_hash(const u8 *eh) | |
113 | { | |
114 | /* The ethernet header is always directly after any hash. */ | |
39c9cf07 | 115 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 |
a526f140 | 116 | return __le32_to_cpup((const __le32 *)(eh - 4)); |
39c9cf07 | 117 | #else |
a526f140 | 118 | const u8 *data = eh - 4; |
0beaca2c BH |
119 | return (u32)data[0] | |
120 | (u32)data[1] << 8 | | |
121 | (u32)data[2] << 16 | | |
122 | (u32)data[3] << 24; | |
39c9cf07 BH |
123 | #endif |
124 | } | |
125 | ||
8ceee660 | 126 | /** |
f7d6f379 | 127 | * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers |
8ceee660 BH |
128 | * |
129 | * @rx_queue: Efx RX queue | |
8ceee660 | 130 | * |
f7d6f379 SH |
131 | * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a |
132 | * struct efx_rx_buffer for each one. Return a negative error code or 0 | |
133 | * on success. May fail having only inserted fewer than EFX_RX_BATCH | |
134 | * buffers. | |
8ceee660 | 135 | */ |
f7d6f379 | 136 | static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
137 | { |
138 | struct efx_nic *efx = rx_queue->efx; | |
139 | struct net_device *net_dev = efx->net_dev; | |
f7d6f379 | 140 | struct efx_rx_buffer *rx_buf; |
8ba5366a | 141 | struct sk_buff *skb; |
8ceee660 | 142 | int skb_len = efx->rx_buffer_len; |
f7d6f379 | 143 | unsigned index, count; |
8ceee660 | 144 | |
f7d6f379 | 145 | for (count = 0; count < EFX_RX_BATCH; ++count) { |
ecc910f5 | 146 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 147 | rx_buf = efx_rx_buffer(rx_queue, index); |
8ceee660 | 148 | |
8ba5366a SH |
149 | rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); |
150 | if (unlikely(!skb)) | |
f7d6f379 | 151 | return -ENOMEM; |
8ceee660 | 152 | |
ff3bc1e7 | 153 | /* Adjust the SKB for padding */ |
8ba5366a | 154 | skb_reserve(skb, NET_IP_ALIGN); |
f7d6f379 | 155 | rx_buf->len = skb_len - NET_IP_ALIGN; |
db339569 | 156 | rx_buf->flags = 0; |
f7d6f379 SH |
157 | |
158 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | |
a526f140 | 159 | skb->data, rx_buf->len, |
f7d6f379 SH |
160 | PCI_DMA_FROMDEVICE); |
161 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | |
162 | rx_buf->dma_addr))) { | |
8ba5366a SH |
163 | dev_kfree_skb_any(skb); |
164 | rx_buf->u.skb = NULL; | |
f7d6f379 SH |
165 | return -EIO; |
166 | } | |
8ceee660 | 167 | |
f7d6f379 SH |
168 | ++rx_queue->added_count; |
169 | ++rx_queue->alloc_skb_count; | |
8ceee660 BH |
170 | } |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | /** | |
f7d6f379 | 176 | * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers |
8ceee660 BH |
177 | * |
178 | * @rx_queue: Efx RX queue | |
8ceee660 | 179 | * |
f7d6f379 SH |
180 | * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, |
181 | * and populates struct efx_rx_buffers for each one. Return a negative error | |
182 | * code or 0 on success. If a single page can be split between two buffers, | |
183 | * then the page will either be inserted fully, or not at at all. | |
8ceee660 | 184 | */ |
f7d6f379 | 185 | static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) |
8ceee660 BH |
186 | { |
187 | struct efx_nic *efx = rx_queue->efx; | |
f7d6f379 SH |
188 | struct efx_rx_buffer *rx_buf; |
189 | struct page *page; | |
62b330ba SH |
190 | void *page_addr; |
191 | struct efx_rx_page_state *state; | |
f7d6f379 SH |
192 | dma_addr_t dma_addr; |
193 | unsigned index, count; | |
194 | ||
195 | /* We can split a page between two buffers */ | |
196 | BUILD_BUG_ON(EFX_RX_BATCH & 1); | |
197 | ||
198 | for (count = 0; count < EFX_RX_BATCH; ++count) { | |
199 | page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | |
200 | efx->rx_buffer_order); | |
201 | if (unlikely(page == NULL)) | |
8ceee660 | 202 | return -ENOMEM; |
f7d6f379 SH |
203 | dma_addr = pci_map_page(efx->pci_dev, page, 0, |
204 | efx_rx_buf_size(efx), | |
8ceee660 | 205 | PCI_DMA_FROMDEVICE); |
8d8bb39b | 206 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
f7d6f379 | 207 | __free_pages(page, efx->rx_buffer_order); |
8ceee660 BH |
208 | return -EIO; |
209 | } | |
62b330ba SH |
210 | page_addr = page_address(page); |
211 | state = page_addr; | |
212 | state->refcnt = 0; | |
213 | state->dma_addr = dma_addr; | |
214 | ||
215 | page_addr += sizeof(struct efx_rx_page_state); | |
216 | dma_addr += sizeof(struct efx_rx_page_state); | |
f7d6f379 SH |
217 | |
218 | split: | |
ecc910f5 | 219 | index = rx_queue->added_count & rx_queue->ptr_mask; |
f7d6f379 | 220 | rx_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 221 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
8ba5366a | 222 | rx_buf->u.page = page; |
f7d6f379 | 223 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
db339569 | 224 | rx_buf->flags = EFX_RX_BUF_PAGE; |
f7d6f379 SH |
225 | ++rx_queue->added_count; |
226 | ++rx_queue->alloc_page_count; | |
62b330ba | 227 | ++state->refcnt; |
f7d6f379 | 228 | |
62b330ba | 229 | if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) { |
f7d6f379 SH |
230 | /* Use the second half of the page */ |
231 | get_page(page); | |
232 | dma_addr += (PAGE_SIZE >> 1); | |
233 | page_addr += (PAGE_SIZE >> 1); | |
234 | ++count; | |
235 | goto split; | |
8ceee660 BH |
236 | } |
237 | } | |
238 | ||
8ceee660 BH |
239 | return 0; |
240 | } | |
241 | ||
4d566063 BH |
242 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
243 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 244 | { |
db339569 | 245 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
62b330ba SH |
246 | struct efx_rx_page_state *state; |
247 | ||
8ba5366a | 248 | state = page_address(rx_buf->u.page); |
62b330ba | 249 | if (--state->refcnt == 0) { |
f7d6f379 | 250 | pci_unmap_page(efx->pci_dev, |
62b330ba | 251 | state->dma_addr, |
55668611 BH |
252 | efx_rx_buf_size(efx), |
253 | PCI_DMA_FROMDEVICE); | |
8ceee660 | 254 | } |
db339569 | 255 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
8ceee660 BH |
256 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
257 | rx_buf->len, PCI_DMA_FROMDEVICE); | |
258 | } | |
259 | } | |
260 | ||
4d566063 BH |
261 | static void efx_free_rx_buffer(struct efx_nic *efx, |
262 | struct efx_rx_buffer *rx_buf) | |
8ceee660 | 263 | { |
db339569 | 264 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
8ba5366a SH |
265 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); |
266 | rx_buf->u.page = NULL; | |
db339569 | 267 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
8ba5366a SH |
268 | dev_kfree_skb_any(rx_buf->u.skb); |
269 | rx_buf->u.skb = NULL; | |
8ceee660 BH |
270 | } |
271 | } | |
272 | ||
4d566063 BH |
273 | static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, |
274 | struct efx_rx_buffer *rx_buf) | |
8ceee660 BH |
275 | { |
276 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | |
277 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | |
278 | } | |
279 | ||
24455800 SH |
280 | /* Attempt to resurrect the other receive buffer that used to share this page, |
281 | * which had previously been passed up to the kernel and freed. */ | |
282 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |
283 | struct efx_rx_buffer *rx_buf) | |
284 | { | |
8ba5366a | 285 | struct efx_rx_page_state *state = page_address(rx_buf->u.page); |
24455800 | 286 | struct efx_rx_buffer *new_buf; |
62b330ba SH |
287 | unsigned fill_level, index; |
288 | ||
289 | /* +1 because efx_rx_packet() incremented removed_count. +1 because | |
290 | * we'd like to insert an additional descriptor whilst leaving | |
291 | * EFX_RXD_HEAD_ROOM for the non-recycle path */ | |
292 | fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); | |
ecc910f5 | 293 | if (unlikely(fill_level > rx_queue->max_fill)) { |
62b330ba SH |
294 | /* We could place "state" on a list, and drain the list in |
295 | * efx_fast_push_rx_descriptors(). For now, this will do. */ | |
296 | return; | |
297 | } | |
24455800 | 298 | |
62b330ba | 299 | ++state->refcnt; |
8ba5366a | 300 | get_page(rx_buf->u.page); |
24455800 | 301 | |
ecc910f5 | 302 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 | 303 | new_buf = efx_rx_buffer(rx_queue, index); |
62b330ba | 304 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
8ba5366a | 305 | new_buf->u.page = rx_buf->u.page; |
24455800 | 306 | new_buf->len = rx_buf->len; |
db339569 | 307 | new_buf->flags = EFX_RX_BUF_PAGE; |
24455800 SH |
308 | ++rx_queue->added_count; |
309 | } | |
310 | ||
311 | /* Recycle the given rx buffer directly back into the rx_queue. There is | |
312 | * always room to add this buffer, because we've just popped a buffer. */ | |
313 | static void efx_recycle_rx_buffer(struct efx_channel *channel, | |
314 | struct efx_rx_buffer *rx_buf) | |
315 | { | |
316 | struct efx_nic *efx = channel->efx; | |
f7d12cdc | 317 | struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); |
24455800 SH |
318 | struct efx_rx_buffer *new_buf; |
319 | unsigned index; | |
320 | ||
db339569 BH |
321 | rx_buf->flags &= EFX_RX_BUF_PAGE; |
322 | ||
323 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && | |
324 | efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | |
8ba5366a | 325 | page_count(rx_buf->u.page) == 1) |
62b330ba | 326 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
24455800 | 327 | |
ecc910f5 | 328 | index = rx_queue->added_count & rx_queue->ptr_mask; |
24455800 SH |
329 | new_buf = efx_rx_buffer(rx_queue, index); |
330 | ||
331 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | |
8ba5366a | 332 | rx_buf->u.page = NULL; |
24455800 SH |
333 | ++rx_queue->added_count; |
334 | } | |
335 | ||
8ceee660 BH |
336 | /** |
337 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | |
338 | * @rx_queue: RX descriptor queue | |
8ceee660 | 339 | * This will aim to fill the RX descriptor queue up to |
da9ca505 | 340 | * @rx_queue->@max_fill. If there is insufficient atomic |
90d683af SH |
341 | * memory to do so, a slow fill will be scheduled. |
342 | * | |
343 | * The caller must provide serialisation (none is used here). In practise, | |
344 | * this means this function must run from the NAPI handler, or be called | |
345 | * when NAPI is disabled. | |
8ceee660 | 346 | */ |
90d683af | 347 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) |
8ceee660 | 348 | { |
ba1e8a35 | 349 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
f7d6f379 SH |
350 | unsigned fill_level; |
351 | int space, rc = 0; | |
8ceee660 | 352 | |
90d683af | 353 | /* Calculate current fill level, and exit if we don't need to fill */ |
8ceee660 | 354 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
ecc910f5 | 355 | EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); |
8ceee660 | 356 | if (fill_level >= rx_queue->fast_fill_trigger) |
24455800 | 357 | goto out; |
8ceee660 BH |
358 | |
359 | /* Record minimum fill level */ | |
b3475645 | 360 | if (unlikely(fill_level < rx_queue->min_fill)) { |
8ceee660 BH |
361 | if (fill_level) |
362 | rx_queue->min_fill = fill_level; | |
b3475645 | 363 | } |
8ceee660 | 364 | |
da9ca505 | 365 | space = rx_queue->max_fill - fill_level; |
64235187 | 366 | EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); |
8ceee660 | 367 | |
62776d03 BH |
368 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
369 | "RX queue %d fast-filling descriptor ring from" | |
370 | " level %d to level %d using %s allocation\n", | |
ba1e8a35 | 371 | efx_rx_queue_index(rx_queue), fill_level, |
da9ca505 | 372 | rx_queue->max_fill, |
62776d03 | 373 | channel->rx_alloc_push_pages ? "page" : "skb"); |
8ceee660 BH |
374 | |
375 | do { | |
f7d6f379 SH |
376 | if (channel->rx_alloc_push_pages) |
377 | rc = efx_init_rx_buffers_page(rx_queue); | |
378 | else | |
379 | rc = efx_init_rx_buffers_skb(rx_queue); | |
380 | if (unlikely(rc)) { | |
381 | /* Ensure that we don't leave the rx queue empty */ | |
382 | if (rx_queue->added_count == rx_queue->removed_count) | |
383 | efx_schedule_slow_fill(rx_queue); | |
384 | goto out; | |
8ceee660 BH |
385 | } |
386 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | |
387 | ||
62776d03 BH |
388 | netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, |
389 | "RX queue %d fast-filled descriptor ring " | |
ba1e8a35 | 390 | "to level %d\n", efx_rx_queue_index(rx_queue), |
62776d03 | 391 | rx_queue->added_count - rx_queue->removed_count); |
8ceee660 BH |
392 | |
393 | out: | |
24455800 SH |
394 | if (rx_queue->notified_count != rx_queue->added_count) |
395 | efx_nic_notify_rx_desc(rx_queue); | |
8ceee660 BH |
396 | } |
397 | ||
90d683af | 398 | void efx_rx_slow_fill(unsigned long context) |
8ceee660 | 399 | { |
90d683af | 400 | struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; |
8ceee660 | 401 | |
90d683af | 402 | /* Post an event to cause NAPI to run and refill the queue */ |
2ae75dac | 403 | efx_nic_generate_fill_event(rx_queue); |
8ceee660 | 404 | ++rx_queue->slow_fill_count; |
8ceee660 BH |
405 | } |
406 | ||
4d566063 BH |
407 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
408 | struct efx_rx_buffer *rx_buf, | |
db339569 | 409 | int len, bool *leak_packet) |
8ceee660 BH |
410 | { |
411 | struct efx_nic *efx = rx_queue->efx; | |
412 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | |
413 | ||
414 | if (likely(len <= max_len)) | |
415 | return; | |
416 | ||
417 | /* The packet must be discarded, but this is only a fatal error | |
418 | * if the caller indicated it was | |
419 | */ | |
db339569 | 420 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
8ceee660 BH |
421 | |
422 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | |
62776d03 BH |
423 | if (net_ratelimit()) |
424 | netif_err(efx, rx_err, efx->net_dev, | |
425 | " RX queue %d seriously overlength " | |
426 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | |
ba1e8a35 | 427 | efx_rx_queue_index(rx_queue), len, max_len, |
62776d03 | 428 | efx->type->rx_buffer_padding); |
8ceee660 BH |
429 | /* If this buffer was skb-allocated, then the meta |
430 | * data at the end of the skb will be trashed. So | |
431 | * we have no choice but to leak the fragment. | |
432 | */ | |
db339569 | 433 | *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); |
8ceee660 BH |
434 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
435 | } else { | |
62776d03 BH |
436 | if (net_ratelimit()) |
437 | netif_err(efx, rx_err, efx->net_dev, | |
438 | " RX queue %d overlength RX event " | |
439 | "(0x%x > 0x%x)\n", | |
ba1e8a35 | 440 | efx_rx_queue_index(rx_queue), len, max_len); |
8ceee660 BH |
441 | } |
442 | ||
ba1e8a35 | 443 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
8ceee660 BH |
444 | } |
445 | ||
61321d92 BH |
446 | /* Pass a received packet up through GRO. GRO can handle pages |
447 | * regardless of checksum state and skbs with a good checksum. | |
8ceee660 | 448 | */ |
4afb7527 | 449 | static void efx_rx_packet_gro(struct efx_channel *channel, |
345056af | 450 | struct efx_rx_buffer *rx_buf, |
db339569 | 451 | const u8 *eh) |
8ceee660 | 452 | { |
da3bc071 | 453 | struct napi_struct *napi = &channel->napi_str; |
18e1d2be | 454 | gro_result_t gro_result; |
8ceee660 | 455 | |
db339569 | 456 | if (rx_buf->flags & EFX_RX_BUF_PAGE) { |
39c9cf07 | 457 | struct efx_nic *efx = channel->efx; |
8ba5366a | 458 | struct page *page = rx_buf->u.page; |
1241e951 | 459 | struct sk_buff *skb; |
8ceee660 | 460 | |
8ba5366a | 461 | rx_buf->u.page = NULL; |
1241e951 BH |
462 | |
463 | skb = napi_get_frags(napi); | |
76620aaf | 464 | if (!skb) { |
1241e951 BH |
465 | put_page(page); |
466 | return; | |
76620aaf HX |
467 | } |
468 | ||
39c9cf07 | 469 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 470 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 471 | |
70350b06 BH |
472 | skb_fill_page_desc(skb, 0, page, |
473 | efx_rx_buf_offset(efx, rx_buf), rx_buf->len); | |
76620aaf HX |
474 | |
475 | skb->len = rx_buf->len; | |
476 | skb->data_len = rx_buf->len; | |
477 | skb->truesize += rx_buf->len; | |
db339569 BH |
478 | skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? |
479 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); | |
8ceee660 | 480 | |
3eadb7b0 BH |
481 | skb_record_rx_queue(skb, channel->channel); |
482 | ||
18e1d2be | 483 | gro_result = napi_gro_frags(napi); |
8ceee660 | 484 | } else { |
8ba5366a | 485 | struct sk_buff *skb = rx_buf->u.skb; |
8ceee660 | 486 | |
db339569 | 487 | EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); |
8ba5366a | 488 | rx_buf->u.skb = NULL; |
ff3bc1e7 | 489 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1241e951 BH |
490 | |
491 | gro_result = napi_gro_receive(napi, skb); | |
8ceee660 | 492 | } |
18e1d2be BH |
493 | |
494 | if (gro_result == GRO_NORMAL) { | |
495 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
496 | } else if (gro_result != GRO_DROP) { | |
4afb7527 | 497 | channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; |
18e1d2be BH |
498 | channel->irq_mod_score += 2; |
499 | } | |
8ceee660 BH |
500 | } |
501 | ||
8ceee660 | 502 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
db339569 | 503 | unsigned int len, u16 flags) |
8ceee660 BH |
504 | { |
505 | struct efx_nic *efx = rx_queue->efx; | |
ba1e8a35 | 506 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
8ceee660 | 507 | struct efx_rx_buffer *rx_buf; |
dc8cfa55 | 508 | bool leak_packet = false; |
8ceee660 BH |
509 | |
510 | rx_buf = efx_rx_buffer(rx_queue, index); | |
db339569 | 511 | rx_buf->flags |= flags; |
8ceee660 BH |
512 | |
513 | /* This allows the refill path to post another buffer. | |
514 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | |
515 | * isn't overwritten yet. | |
516 | */ | |
517 | rx_queue->removed_count++; | |
518 | ||
519 | /* Validate the length encoded in the event vs the descriptor pushed */ | |
db339569 | 520 | efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); |
8ceee660 | 521 | |
62776d03 BH |
522 | netif_vdbg(efx, rx_status, efx->net_dev, |
523 | "RX queue %d received id %x at %llx+%x %s%s\n", | |
ba1e8a35 | 524 | efx_rx_queue_index(rx_queue), index, |
62776d03 | 525 | (unsigned long long)rx_buf->dma_addr, len, |
db339569 BH |
526 | (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", |
527 | (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); | |
8ceee660 BH |
528 | |
529 | /* Discard packet, if instructed to do so */ | |
db339569 | 530 | if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { |
8ceee660 | 531 | if (unlikely(leak_packet)) |
24455800 | 532 | channel->n_skbuff_leaks++; |
8ceee660 | 533 | else |
24455800 SH |
534 | efx_recycle_rx_buffer(channel, rx_buf); |
535 | ||
536 | /* Don't hold off the previous receive */ | |
537 | rx_buf = NULL; | |
538 | goto out; | |
8ceee660 BH |
539 | } |
540 | ||
541 | /* Release card resources - assumes all RX buffers consumed in-order | |
542 | * per RX queue | |
543 | */ | |
544 | efx_unmap_rx_buffer(efx, rx_buf); | |
545 | ||
546 | /* Prefetch nice and early so data will (hopefully) be in cache by | |
547 | * the time we look at it. | |
548 | */ | |
a526f140 | 549 | prefetch(efx_rx_buf_eh(efx, rx_buf)); |
8ceee660 BH |
550 | |
551 | /* Pipeline receives so that we give time for packet headers to be | |
552 | * prefetched into cache. | |
553 | */ | |
a526f140 | 554 | rx_buf->len = len - efx->type->rx_buffer_hash_size; |
24455800 | 555 | out: |
ba1e8a35 | 556 | if (channel->rx_pkt) |
db339569 | 557 | __efx_rx_packet(channel, channel->rx_pkt); |
ba1e8a35 | 558 | channel->rx_pkt = rx_buf; |
8ceee660 BH |
559 | } |
560 | ||
1ddceb4c BH |
561 | static void efx_rx_deliver(struct efx_channel *channel, |
562 | struct efx_rx_buffer *rx_buf) | |
563 | { | |
564 | struct sk_buff *skb; | |
565 | ||
566 | /* We now own the SKB */ | |
567 | skb = rx_buf->u.skb; | |
568 | rx_buf->u.skb = NULL; | |
569 | ||
570 | /* Set the SKB flags */ | |
571 | skb_checksum_none_assert(skb); | |
572 | ||
573 | /* Pass the packet up */ | |
574 | netif_receive_skb(skb); | |
575 | ||
576 | /* Update allocation strategy method */ | |
577 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | |
578 | } | |
579 | ||
8ceee660 | 580 | /* Handle a received packet. Second half: Touches packet payload. */ |
db339569 | 581 | void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) |
8ceee660 BH |
582 | { |
583 | struct efx_nic *efx = channel->efx; | |
a526f140 | 584 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); |
604f6049 | 585 | |
3273c2e8 BH |
586 | /* If we're in loopback test, then pass the packet directly to the |
587 | * loopback layer, and free the rx_buf here | |
588 | */ | |
589 | if (unlikely(efx->loopback_selftest)) { | |
a526f140 | 590 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
3273c2e8 | 591 | efx_free_rx_buffer(efx, rx_buf); |
d96d7dc9 | 592 | return; |
3273c2e8 BH |
593 | } |
594 | ||
db339569 | 595 | if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { |
1ddceb4c | 596 | struct sk_buff *skb = rx_buf->u.skb; |
8ba5366a SH |
597 | |
598 | prefetch(skb_shinfo(skb)); | |
8ceee660 | 599 | |
8ba5366a SH |
600 | skb_reserve(skb, efx->type->rx_buffer_hash_size); |
601 | skb_put(skb, rx_buf->len); | |
8ceee660 | 602 | |
39c9cf07 | 603 | if (efx->net_dev->features & NETIF_F_RXHASH) |
a526f140 | 604 | skb->rxhash = efx_rx_buf_hash(eh); |
39c9cf07 | 605 | |
8ceee660 BH |
606 | /* Move past the ethernet header. rx_buf->data still points |
607 | * at the ethernet header */ | |
8ba5366a | 608 | skb->protocol = eth_type_trans(skb, efx->net_dev); |
3eadb7b0 | 609 | |
8ba5366a | 610 | skb_record_rx_queue(skb, channel->channel); |
8ceee660 BH |
611 | } |
612 | ||
abfe9039 | 613 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
db339569 | 614 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; |
ab3cf6d0 | 615 | |
db339569 BH |
616 | if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) |
617 | efx_rx_packet_gro(channel, rx_buf, eh); | |
1ddceb4c BH |
618 | else |
619 | efx_rx_deliver(channel, rx_buf); | |
8ceee660 BH |
620 | } |
621 | ||
622 | void efx_rx_strategy(struct efx_channel *channel) | |
623 | { | |
624 | enum efx_rx_alloc_method method = rx_alloc_method; | |
625 | ||
4afb7527 | 626 | /* Only makes sense to use page based allocation if GRO is enabled */ |
da3bc071 | 627 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
8ceee660 BH |
628 | method = RX_ALLOC_METHOD_SKB; |
629 | } else if (method == RX_ALLOC_METHOD_AUTO) { | |
630 | /* Constrain the rx_alloc_level */ | |
631 | if (channel->rx_alloc_level < 0) | |
632 | channel->rx_alloc_level = 0; | |
633 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | |
634 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | |
635 | ||
636 | /* Decide on the allocation method */ | |
4afb7527 | 637 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? |
8ceee660 BH |
638 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); |
639 | } | |
640 | ||
641 | /* Push the option */ | |
642 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | |
643 | } | |
644 | ||
645 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |
646 | { | |
647 | struct efx_nic *efx = rx_queue->efx; | |
ecc910f5 | 648 | unsigned int entries; |
8ceee660 BH |
649 | int rc; |
650 | ||
ecc910f5 SH |
651 | /* Create the smallest power-of-two aligned ring */ |
652 | entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); | |
653 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); | |
654 | rx_queue->ptr_mask = entries - 1; | |
655 | ||
62776d03 | 656 | netif_dbg(efx, probe, efx->net_dev, |
ecc910f5 SH |
657 | "creating RX queue %d size %#x mask %#x\n", |
658 | efx_rx_queue_index(rx_queue), efx->rxq_entries, | |
659 | rx_queue->ptr_mask); | |
8ceee660 BH |
660 | |
661 | /* Allocate RX buffers */ | |
c2e4e25a | 662 | rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), |
ecc910f5 | 663 | GFP_KERNEL); |
8831da7b BH |
664 | if (!rx_queue->buffer) |
665 | return -ENOMEM; | |
8ceee660 | 666 | |
152b6a62 | 667 | rc = efx_nic_probe_rx(rx_queue); |
8831da7b BH |
668 | if (rc) { |
669 | kfree(rx_queue->buffer); | |
670 | rx_queue->buffer = NULL; | |
671 | } | |
8ceee660 BH |
672 | return rc; |
673 | } | |
674 | ||
bc3c90a2 | 675 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
8ceee660 | 676 | { |
ecc910f5 | 677 | struct efx_nic *efx = rx_queue->efx; |
64235187 | 678 | unsigned int max_fill, trigger, max_trigger; |
8ceee660 | 679 | |
62776d03 | 680 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 681 | "initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 BH |
682 | |
683 | /* Initialise ptr fields */ | |
684 | rx_queue->added_count = 0; | |
685 | rx_queue->notified_count = 0; | |
686 | rx_queue->removed_count = 0; | |
687 | rx_queue->min_fill = -1U; | |
8ceee660 BH |
688 | |
689 | /* Initialise limit fields */ | |
ecc910f5 | 690 | max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; |
64235187 DR |
691 | max_trigger = max_fill - EFX_RX_BATCH; |
692 | if (rx_refill_threshold != 0) { | |
693 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | |
694 | if (trigger > max_trigger) | |
695 | trigger = max_trigger; | |
696 | } else { | |
697 | trigger = max_trigger; | |
698 | } | |
8ceee660 BH |
699 | |
700 | rx_queue->max_fill = max_fill; | |
701 | rx_queue->fast_fill_trigger = trigger; | |
8ceee660 BH |
702 | |
703 | /* Set up RX descriptor ring */ | |
9f2cb71c | 704 | rx_queue->enabled = true; |
152b6a62 | 705 | efx_nic_init_rx(rx_queue); |
8ceee660 BH |
706 | } |
707 | ||
708 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |
709 | { | |
710 | int i; | |
711 | struct efx_rx_buffer *rx_buf; | |
712 | ||
62776d03 | 713 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 714 | "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 715 | |
9f2cb71c BH |
716 | /* A flush failure might have left rx_queue->enabled */ |
717 | rx_queue->enabled = false; | |
718 | ||
90d683af | 719 | del_timer_sync(&rx_queue->slow_fill); |
152b6a62 | 720 | efx_nic_fini_rx(rx_queue); |
8ceee660 BH |
721 | |
722 | /* Release RX buffers NB start at index 0 not current HW ptr */ | |
723 | if (rx_queue->buffer) { | |
ecc910f5 | 724 | for (i = 0; i <= rx_queue->ptr_mask; i++) { |
8ceee660 BH |
725 | rx_buf = efx_rx_buffer(rx_queue, i); |
726 | efx_fini_rx_buffer(rx_queue, rx_buf); | |
727 | } | |
728 | } | |
8ceee660 BH |
729 | } |
730 | ||
731 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |
732 | { | |
62776d03 | 733 | netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, |
ba1e8a35 | 734 | "destroying RX queue %d\n", efx_rx_queue_index(rx_queue)); |
8ceee660 | 735 | |
152b6a62 | 736 | efx_nic_remove_rx(rx_queue); |
8ceee660 BH |
737 | |
738 | kfree(rx_queue->buffer); | |
739 | rx_queue->buffer = NULL; | |
8ceee660 BH |
740 | } |
741 | ||
8ceee660 BH |
742 | |
743 | module_param(rx_alloc_method, int, 0644); | |
744 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | |
745 | ||
746 | module_param(rx_refill_threshold, uint, 0444); | |
747 | MODULE_PARM_DESC(rx_refill_threshold, | |
64235187 | 748 | "RX descriptor ring refill threshold (%)"); |
8ceee660 | 749 |