Commit | Line | Data |
---|---|---|
ab69bde6 JB |
1 | /* |
2 | * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> | |
3 | * | |
4 | * This file is free software: you may copy, redistribute and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the | |
6 | * Free Software Foundation, either version 2 of the License, or (at your | |
7 | * option) any later version. | |
8 | * | |
9 | * This file is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | * | |
17 | * This file incorporates work covered by the following copyright and | |
18 | * permission notice: | |
19 | * | |
20 | * Copyright (c) 2012 Qualcomm Atheros, Inc. | |
21 | * | |
22 | * Permission to use, copy, modify, and/or distribute this software for any | |
23 | * purpose with or without fee is hereby granted, provided that the above | |
24 | * copyright notice and this permission notice appear in all copies. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
27 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
28 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
29 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
30 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
32 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <linux/module.h> | |
36 | #include <linux/pci.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/ip.h> | |
39 | #include <linux/ipv6.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/mdio.h> | |
42 | #include <linux/aer.h> | |
43 | #include <linux/bitops.h> | |
44 | #include <linux/netdevice.h> | |
45 | #include <linux/etherdevice.h> | |
46 | #include <net/ip6_checksum.h> | |
47 | #include <linux/crc32.h> | |
48 | #include "alx.h" | |
49 | #include "hw.h" | |
50 | #include "reg.h" | |
51 | ||
52 | const char alx_drv_name[] = "alx"; | |
53 | ||
54 | ||
55 | static void alx_free_txbuf(struct alx_priv *alx, int entry) | |
56 | { | |
57 | struct alx_buffer *txb = &alx->txq.bufs[entry]; | |
58 | ||
59 | if (dma_unmap_len(txb, size)) { | |
60 | dma_unmap_single(&alx->hw.pdev->dev, | |
61 | dma_unmap_addr(txb, dma), | |
62 | dma_unmap_len(txb, size), | |
63 | DMA_TO_DEVICE); | |
64 | dma_unmap_len_set(txb, size, 0); | |
65 | } | |
66 | ||
67 | if (txb->skb) { | |
68 | dev_kfree_skb_any(txb->skb); | |
69 | txb->skb = NULL; | |
70 | } | |
71 | } | |
72 | ||
73 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |
74 | { | |
75 | struct alx_rx_queue *rxq = &alx->rxq; | |
76 | struct sk_buff *skb; | |
77 | struct alx_buffer *cur_buf; | |
78 | dma_addr_t dma; | |
79 | u16 cur, next, count = 0; | |
80 | ||
81 | next = cur = rxq->write_idx; | |
82 | if (++next == alx->rx_ringsz) | |
83 | next = 0; | |
84 | cur_buf = &rxq->bufs[cur]; | |
85 | ||
86 | while (!cur_buf->skb && next != rxq->read_idx) { | |
87 | struct alx_rfd *rfd = &rxq->rfd[cur]; | |
88 | ||
5bccf771 | 89 | skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); |
ab69bde6 JB |
90 | if (!skb) |
91 | break; | |
5bccf771 FT |
92 | |
93 | /* Workround for the HW RX DMA overflow issue */ | |
94 | if (((unsigned long)skb->data & 0xfff) == 0xfc0) | |
95 | skb_reserve(skb, 64); | |
96 | ||
ab69bde6 JB |
97 | dma = dma_map_single(&alx->hw.pdev->dev, |
98 | skb->data, alx->rxbuf_size, | |
99 | DMA_FROM_DEVICE); | |
100 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) { | |
101 | dev_kfree_skb(skb); | |
102 | break; | |
103 | } | |
104 | ||
105 | /* Unfortunately, RX descriptor buffers must be 4-byte | |
106 | * aligned, so we can't use IP alignment. | |
107 | */ | |
108 | if (WARN_ON(dma & 3)) { | |
109 | dev_kfree_skb(skb); | |
110 | break; | |
111 | } | |
112 | ||
113 | cur_buf->skb = skb; | |
114 | dma_unmap_len_set(cur_buf, size, alx->rxbuf_size); | |
115 | dma_unmap_addr_set(cur_buf, dma, dma); | |
116 | rfd->addr = cpu_to_le64(dma); | |
117 | ||
118 | cur = next; | |
119 | if (++next == alx->rx_ringsz) | |
120 | next = 0; | |
121 | cur_buf = &rxq->bufs[cur]; | |
122 | count++; | |
123 | } | |
124 | ||
125 | if (count) { | |
126 | /* flush all updates before updating hardware */ | |
127 | wmb(); | |
128 | rxq->write_idx = cur; | |
129 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); | |
130 | } | |
131 | ||
132 | return count; | |
133 | } | |
134 | ||
135 | static inline int alx_tpd_avail(struct alx_priv *alx) | |
136 | { | |
137 | struct alx_tx_queue *txq = &alx->txq; | |
138 | ||
139 | if (txq->write_idx >= txq->read_idx) | |
140 | return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; | |
141 | return txq->read_idx - txq->write_idx - 1; | |
142 | } | |
143 | ||
144 | static bool alx_clean_tx_irq(struct alx_priv *alx) | |
145 | { | |
146 | struct alx_tx_queue *txq = &alx->txq; | |
147 | u16 hw_read_idx, sw_read_idx; | |
148 | unsigned int total_bytes = 0, total_packets = 0; | |
149 | int budget = ALX_DEFAULT_TX_WORK; | |
150 | ||
151 | sw_read_idx = txq->read_idx; | |
152 | hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); | |
153 | ||
154 | if (sw_read_idx != hw_read_idx) { | |
155 | while (sw_read_idx != hw_read_idx && budget > 0) { | |
156 | struct sk_buff *skb; | |
157 | ||
158 | skb = txq->bufs[sw_read_idx].skb; | |
159 | if (skb) { | |
160 | total_bytes += skb->len; | |
161 | total_packets++; | |
162 | budget--; | |
163 | } | |
164 | ||
165 | alx_free_txbuf(alx, sw_read_idx); | |
166 | ||
167 | if (++sw_read_idx == alx->tx_ringsz) | |
168 | sw_read_idx = 0; | |
169 | } | |
170 | txq->read_idx = sw_read_idx; | |
171 | ||
172 | netdev_completed_queue(alx->dev, total_packets, total_bytes); | |
173 | } | |
174 | ||
175 | if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && | |
176 | alx_tpd_avail(alx) > alx->tx_ringsz/4) | |
177 | netif_wake_queue(alx->dev); | |
178 | ||
179 | return sw_read_idx == hw_read_idx; | |
180 | } | |
181 | ||
182 | static void alx_schedule_link_check(struct alx_priv *alx) | |
183 | { | |
184 | schedule_work(&alx->link_check_wk); | |
185 | } | |
186 | ||
187 | static void alx_schedule_reset(struct alx_priv *alx) | |
188 | { | |
189 | schedule_work(&alx->reset_wk); | |
190 | } | |
191 | ||
416b16c1 | 192 | static int alx_clean_rx_irq(struct alx_priv *alx, int budget) |
ab69bde6 JB |
193 | { |
194 | struct alx_rx_queue *rxq = &alx->rxq; | |
195 | struct alx_rrd *rrd; | |
196 | struct alx_buffer *rxb; | |
197 | struct sk_buff *skb; | |
198 | u16 length, rfd_cleaned = 0; | |
416b16c1 | 199 | int work = 0; |
ab69bde6 | 200 | |
416b16c1 | 201 | while (work < budget) { |
ab69bde6 JB |
202 | rrd = &rxq->rrd[rxq->rrd_read_idx]; |
203 | if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) | |
204 | break; | |
205 | rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT); | |
206 | ||
207 | if (ALX_GET_FIELD(le32_to_cpu(rrd->word0), | |
208 | RRD_SI) != rxq->read_idx || | |
209 | ALX_GET_FIELD(le32_to_cpu(rrd->word0), | |
210 | RRD_NOR) != 1) { | |
211 | alx_schedule_reset(alx); | |
416b16c1 | 212 | return work; |
ab69bde6 JB |
213 | } |
214 | ||
215 | rxb = &rxq->bufs[rxq->read_idx]; | |
216 | dma_unmap_single(&alx->hw.pdev->dev, | |
217 | dma_unmap_addr(rxb, dma), | |
218 | dma_unmap_len(rxb, size), | |
219 | DMA_FROM_DEVICE); | |
220 | dma_unmap_len_set(rxb, size, 0); | |
221 | skb = rxb->skb; | |
222 | rxb->skb = NULL; | |
223 | ||
224 | if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) || | |
225 | rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) { | |
226 | rrd->word3 = 0; | |
227 | dev_kfree_skb_any(skb); | |
228 | goto next_pkt; | |
229 | } | |
230 | ||
231 | length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), | |
232 | RRD_PKTLEN) - ETH_FCS_LEN; | |
233 | skb_put(skb, length); | |
234 | skb->protocol = eth_type_trans(skb, alx->dev); | |
235 | ||
236 | skb_checksum_none_assert(skb); | |
237 | if (alx->dev->features & NETIF_F_RXCSUM && | |
238 | !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) | | |
239 | cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) { | |
240 | switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2), | |
241 | RRD_PID)) { | |
242 | case RRD_PID_IPV6UDP: | |
243 | case RRD_PID_IPV4UDP: | |
244 | case RRD_PID_IPV4TCP: | |
245 | case RRD_PID_IPV6TCP: | |
246 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
247 | break; | |
248 | } | |
249 | } | |
250 | ||
251 | napi_gro_receive(&alx->napi, skb); | |
416b16c1 | 252 | work++; |
ab69bde6 JB |
253 | |
254 | next_pkt: | |
255 | if (++rxq->read_idx == alx->rx_ringsz) | |
256 | rxq->read_idx = 0; | |
257 | if (++rxq->rrd_read_idx == alx->rx_ringsz) | |
258 | rxq->rrd_read_idx = 0; | |
259 | ||
260 | if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) | |
261 | rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC); | |
262 | } | |
263 | ||
264 | if (rfd_cleaned) | |
265 | alx_refill_rx_ring(alx, GFP_ATOMIC); | |
266 | ||
416b16c1 | 267 | return work; |
ab69bde6 JB |
268 | } |
269 | ||
270 | static int alx_poll(struct napi_struct *napi, int budget) | |
271 | { | |
272 | struct alx_priv *alx = container_of(napi, struct alx_priv, napi); | |
273 | struct alx_hw *hw = &alx->hw; | |
ab69bde6 | 274 | unsigned long flags; |
416b16c1 ED |
275 | bool tx_complete; |
276 | int work; | |
ab69bde6 | 277 | |
416b16c1 ED |
278 | tx_complete = alx_clean_tx_irq(alx); |
279 | work = alx_clean_rx_irq(alx, budget); | |
ab69bde6 | 280 | |
416b16c1 ED |
281 | if (!tx_complete || work == budget) |
282 | return budget; | |
ab69bde6 JB |
283 | |
284 | napi_complete(&alx->napi); | |
285 | ||
286 | /* enable interrupt */ | |
287 | spin_lock_irqsave(&alx->irq_lock, flags); | |
288 | alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; | |
289 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
290 | spin_unlock_irqrestore(&alx->irq_lock, flags); | |
291 | ||
292 | alx_post_write(hw); | |
293 | ||
416b16c1 | 294 | return work; |
ab69bde6 JB |
295 | } |
296 | ||
297 | static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) | |
298 | { | |
299 | struct alx_hw *hw = &alx->hw; | |
300 | bool write_int_mask = false; | |
301 | ||
302 | spin_lock(&alx->irq_lock); | |
303 | ||
304 | /* ACK interrupt */ | |
305 | alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS); | |
306 | intr &= alx->int_mask; | |
307 | ||
308 | if (intr & ALX_ISR_FATAL) { | |
309 | netif_warn(alx, hw, alx->dev, | |
310 | "fatal interrupt 0x%x, resetting\n", intr); | |
311 | alx_schedule_reset(alx); | |
312 | goto out; | |
313 | } | |
314 | ||
315 | if (intr & ALX_ISR_ALERT) | |
316 | netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr); | |
317 | ||
318 | if (intr & ALX_ISR_PHY) { | |
319 | /* suppress PHY interrupt, because the source | |
320 | * is from PHY internal. only the internal status | |
321 | * is cleared, the interrupt status could be cleared. | |
322 | */ | |
323 | alx->int_mask &= ~ALX_ISR_PHY; | |
324 | write_int_mask = true; | |
325 | alx_schedule_link_check(alx); | |
326 | } | |
327 | ||
328 | if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { | |
329 | napi_schedule(&alx->napi); | |
330 | /* mask rx/tx interrupt, enable them when napi complete */ | |
331 | alx->int_mask &= ~ALX_ISR_ALL_QUEUES; | |
332 | write_int_mask = true; | |
333 | } | |
334 | ||
335 | if (write_int_mask) | |
336 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
337 | ||
338 | alx_write_mem32(hw, ALX_ISR, 0); | |
339 | ||
340 | out: | |
341 | spin_unlock(&alx->irq_lock); | |
342 | return IRQ_HANDLED; | |
343 | } | |
344 | ||
345 | static irqreturn_t alx_intr_msi(int irq, void *data) | |
346 | { | |
347 | struct alx_priv *alx = data; | |
348 | ||
349 | return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR)); | |
350 | } | |
351 | ||
352 | static irqreturn_t alx_intr_legacy(int irq, void *data) | |
353 | { | |
354 | struct alx_priv *alx = data; | |
355 | struct alx_hw *hw = &alx->hw; | |
356 | u32 intr; | |
357 | ||
358 | intr = alx_read_mem32(hw, ALX_ISR); | |
359 | ||
360 | if (intr & ALX_ISR_DIS || !(intr & alx->int_mask)) | |
361 | return IRQ_NONE; | |
362 | ||
363 | return alx_intr_handle(alx, intr); | |
364 | } | |
365 | ||
366 | static void alx_init_ring_ptrs(struct alx_priv *alx) | |
367 | { | |
368 | struct alx_hw *hw = &alx->hw; | |
369 | u32 addr_hi = ((u64)alx->descmem.dma) >> 32; | |
370 | ||
371 | alx->rxq.read_idx = 0; | |
372 | alx->rxq.write_idx = 0; | |
373 | alx->rxq.rrd_read_idx = 0; | |
374 | alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); | |
375 | alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); | |
376 | alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); | |
377 | alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); | |
378 | alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); | |
379 | alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); | |
380 | ||
381 | alx->txq.read_idx = 0; | |
382 | alx->txq.write_idx = 0; | |
383 | alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi); | |
384 | alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma); | |
385 | alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz); | |
386 | ||
387 | /* load these pointers into the chip */ | |
388 | alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); | |
389 | } | |
390 | ||
391 | static void alx_free_txring_buf(struct alx_priv *alx) | |
392 | { | |
393 | struct alx_tx_queue *txq = &alx->txq; | |
394 | int i; | |
395 | ||
396 | if (!txq->bufs) | |
397 | return; | |
398 | ||
399 | for (i = 0; i < alx->tx_ringsz; i++) | |
400 | alx_free_txbuf(alx, i); | |
401 | ||
402 | memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); | |
403 | memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); | |
404 | txq->write_idx = 0; | |
405 | txq->read_idx = 0; | |
406 | ||
407 | netdev_reset_queue(alx->dev); | |
408 | } | |
409 | ||
410 | static void alx_free_rxring_buf(struct alx_priv *alx) | |
411 | { | |
412 | struct alx_rx_queue *rxq = &alx->rxq; | |
413 | struct alx_buffer *cur_buf; | |
414 | u16 i; | |
415 | ||
416 | if (rxq == NULL) | |
417 | return; | |
418 | ||
419 | for (i = 0; i < alx->rx_ringsz; i++) { | |
420 | cur_buf = rxq->bufs + i; | |
421 | if (cur_buf->skb) { | |
422 | dma_unmap_single(&alx->hw.pdev->dev, | |
423 | dma_unmap_addr(cur_buf, dma), | |
424 | dma_unmap_len(cur_buf, size), | |
425 | DMA_FROM_DEVICE); | |
426 | dev_kfree_skb(cur_buf->skb); | |
427 | cur_buf->skb = NULL; | |
428 | dma_unmap_len_set(cur_buf, size, 0); | |
429 | dma_unmap_addr_set(cur_buf, dma, 0); | |
430 | } | |
431 | } | |
432 | ||
433 | rxq->write_idx = 0; | |
434 | rxq->read_idx = 0; | |
435 | rxq->rrd_read_idx = 0; | |
436 | } | |
437 | ||
438 | static void alx_free_buffers(struct alx_priv *alx) | |
439 | { | |
440 | alx_free_txring_buf(alx); | |
441 | alx_free_rxring_buf(alx); | |
442 | } | |
443 | ||
444 | static int alx_reinit_rings(struct alx_priv *alx) | |
445 | { | |
446 | alx_free_buffers(alx); | |
447 | ||
448 | alx_init_ring_ptrs(alx); | |
449 | ||
450 | if (!alx_refill_rx_ring(alx, GFP_KERNEL)) | |
451 | return -ENOMEM; | |
452 | ||
453 | return 0; | |
454 | } | |
455 | ||
456 | static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash) | |
457 | { | |
458 | u32 crc32, bit, reg; | |
459 | ||
460 | crc32 = ether_crc(ETH_ALEN, addr); | |
461 | reg = (crc32 >> 31) & 0x1; | |
462 | bit = (crc32 >> 26) & 0x1F; | |
463 | ||
464 | mc_hash[reg] |= BIT(bit); | |
465 | } | |
466 | ||
467 | static void __alx_set_rx_mode(struct net_device *netdev) | |
468 | { | |
469 | struct alx_priv *alx = netdev_priv(netdev); | |
470 | struct alx_hw *hw = &alx->hw; | |
471 | struct netdev_hw_addr *ha; | |
472 | u32 mc_hash[2] = {}; | |
473 | ||
474 | if (!(netdev->flags & IFF_ALLMULTI)) { | |
475 | netdev_for_each_mc_addr(ha, netdev) | |
476 | alx_add_mc_addr(hw, ha->addr, mc_hash); | |
477 | ||
478 | alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]); | |
479 | alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]); | |
480 | } | |
481 | ||
482 | hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN); | |
483 | if (netdev->flags & IFF_PROMISC) | |
484 | hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN; | |
485 | if (netdev->flags & IFF_ALLMULTI) | |
486 | hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN; | |
487 | ||
488 | alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); | |
489 | } | |
490 | ||
491 | static void alx_set_rx_mode(struct net_device *netdev) | |
492 | { | |
493 | __alx_set_rx_mode(netdev); | |
494 | } | |
495 | ||
496 | static int alx_set_mac_address(struct net_device *netdev, void *data) | |
497 | { | |
498 | struct alx_priv *alx = netdev_priv(netdev); | |
499 | struct alx_hw *hw = &alx->hw; | |
500 | struct sockaddr *addr = data; | |
501 | ||
502 | if (!is_valid_ether_addr(addr->sa_data)) | |
503 | return -EADDRNOTAVAIL; | |
504 | ||
505 | if (netdev->addr_assign_type & NET_ADDR_RANDOM) | |
506 | netdev->addr_assign_type ^= NET_ADDR_RANDOM; | |
507 | ||
508 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
509 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); | |
510 | alx_set_macaddr(hw, hw->mac_addr); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | static int alx_alloc_descriptors(struct alx_priv *alx) | |
516 | { | |
517 | alx->txq.bufs = kcalloc(alx->tx_ringsz, | |
518 | sizeof(struct alx_buffer), | |
519 | GFP_KERNEL); | |
520 | if (!alx->txq.bufs) | |
521 | return -ENOMEM; | |
522 | ||
523 | alx->rxq.bufs = kcalloc(alx->rx_ringsz, | |
524 | sizeof(struct alx_buffer), | |
525 | GFP_KERNEL); | |
526 | if (!alx->rxq.bufs) | |
527 | goto out_free; | |
528 | ||
529 | /* physical tx/rx ring descriptors | |
530 | * | |
531 | * Allocate them as a single chunk because they must not cross a | |
532 | * 4G boundary (hardware has a single register for high 32 bits | |
533 | * of addresses only) | |
534 | */ | |
535 | alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + | |
536 | sizeof(struct alx_rrd) * alx->rx_ringsz + | |
537 | sizeof(struct alx_rfd) * alx->rx_ringsz; | |
538 | alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, | |
539 | alx->descmem.size, | |
540 | &alx->descmem.dma, | |
541 | GFP_KERNEL); | |
542 | if (!alx->descmem.virt) | |
543 | goto out_free; | |
544 | ||
545 | alx->txq.tpd = (void *)alx->descmem.virt; | |
546 | alx->txq.tpd_dma = alx->descmem.dma; | |
547 | ||
548 | /* alignment requirement for next block */ | |
549 | BUILD_BUG_ON(sizeof(struct alx_txd) % 8); | |
550 | ||
551 | alx->rxq.rrd = | |
552 | (void *)((u8 *)alx->descmem.virt + | |
553 | sizeof(struct alx_txd) * alx->tx_ringsz); | |
554 | alx->rxq.rrd_dma = alx->descmem.dma + | |
555 | sizeof(struct alx_txd) * alx->tx_ringsz; | |
556 | ||
557 | /* alignment requirement for next block */ | |
558 | BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); | |
559 | ||
560 | alx->rxq.rfd = | |
561 | (void *)((u8 *)alx->descmem.virt + | |
562 | sizeof(struct alx_txd) * alx->tx_ringsz + | |
563 | sizeof(struct alx_rrd) * alx->rx_ringsz); | |
564 | alx->rxq.rfd_dma = alx->descmem.dma + | |
565 | sizeof(struct alx_txd) * alx->tx_ringsz + | |
566 | sizeof(struct alx_rrd) * alx->rx_ringsz; | |
567 | ||
568 | return 0; | |
569 | out_free: | |
570 | kfree(alx->txq.bufs); | |
571 | kfree(alx->rxq.bufs); | |
572 | return -ENOMEM; | |
573 | } | |
574 | ||
575 | static int alx_alloc_rings(struct alx_priv *alx) | |
576 | { | |
577 | int err; | |
578 | ||
579 | err = alx_alloc_descriptors(alx); | |
580 | if (err) | |
581 | return err; | |
582 | ||
583 | alx->int_mask &= ~ALX_ISR_ALL_QUEUES; | |
584 | alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; | |
585 | alx->tx_ringsz = alx->tx_ringsz; | |
586 | ||
587 | netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); | |
588 | ||
589 | alx_reinit_rings(alx); | |
590 | return 0; | |
591 | } | |
592 | ||
593 | static void alx_free_rings(struct alx_priv *alx) | |
594 | { | |
595 | netif_napi_del(&alx->napi); | |
596 | alx_free_buffers(alx); | |
597 | ||
598 | kfree(alx->txq.bufs); | |
599 | kfree(alx->rxq.bufs); | |
600 | ||
601 | dma_free_coherent(&alx->hw.pdev->dev, | |
602 | alx->descmem.size, | |
603 | alx->descmem.virt, | |
604 | alx->descmem.dma); | |
605 | } | |
606 | ||
607 | static void alx_config_vector_mapping(struct alx_priv *alx) | |
608 | { | |
609 | struct alx_hw *hw = &alx->hw; | |
610 | ||
611 | alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0); | |
612 | alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); | |
613 | alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); | |
614 | } | |
615 | ||
616 | static void alx_irq_enable(struct alx_priv *alx) | |
617 | { | |
618 | struct alx_hw *hw = &alx->hw; | |
619 | ||
620 | /* level-1 interrupt switch */ | |
621 | alx_write_mem32(hw, ALX_ISR, 0); | |
622 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
623 | alx_post_write(hw); | |
624 | } | |
625 | ||
626 | static void alx_irq_disable(struct alx_priv *alx) | |
627 | { | |
628 | struct alx_hw *hw = &alx->hw; | |
629 | ||
630 | alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS); | |
631 | alx_write_mem32(hw, ALX_IMR, 0); | |
632 | alx_post_write(hw); | |
633 | ||
634 | synchronize_irq(alx->hw.pdev->irq); | |
635 | } | |
636 | ||
637 | static int alx_request_irq(struct alx_priv *alx) | |
638 | { | |
639 | struct pci_dev *pdev = alx->hw.pdev; | |
640 | struct alx_hw *hw = &alx->hw; | |
641 | int err; | |
642 | u32 msi_ctrl; | |
643 | ||
644 | msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT; | |
645 | ||
646 | if (!pci_enable_msi(alx->hw.pdev)) { | |
647 | alx->msi = true; | |
648 | ||
649 | alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, | |
650 | msi_ctrl | ALX_MSI_MASK_SEL_LINE); | |
651 | err = request_irq(pdev->irq, alx_intr_msi, 0, | |
652 | alx->dev->name, alx); | |
653 | if (!err) | |
654 | goto out; | |
655 | /* fall back to legacy interrupt */ | |
656 | pci_disable_msi(alx->hw.pdev); | |
657 | } | |
658 | ||
659 | alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0); | |
660 | err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED, | |
661 | alx->dev->name, alx); | |
662 | out: | |
663 | if (!err) | |
664 | alx_config_vector_mapping(alx); | |
665 | return err; | |
666 | } | |
667 | ||
668 | static void alx_free_irq(struct alx_priv *alx) | |
669 | { | |
670 | struct pci_dev *pdev = alx->hw.pdev; | |
671 | ||
672 | free_irq(pdev->irq, alx); | |
673 | ||
674 | if (alx->msi) { | |
675 | pci_disable_msi(alx->hw.pdev); | |
676 | alx->msi = false; | |
677 | } | |
678 | } | |
679 | ||
680 | static int alx_identify_hw(struct alx_priv *alx) | |
681 | { | |
682 | struct alx_hw *hw = &alx->hw; | |
683 | int rev = alx_hw_revision(hw); | |
684 | ||
685 | if (rev > ALX_REV_C0) | |
686 | return -EINVAL; | |
687 | ||
688 | hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2; | |
689 | ||
690 | return 0; | |
691 | } | |
692 | ||
693 | static int alx_init_sw(struct alx_priv *alx) | |
694 | { | |
695 | struct pci_dev *pdev = alx->hw.pdev; | |
696 | struct alx_hw *hw = &alx->hw; | |
697 | int err; | |
698 | ||
699 | err = alx_identify_hw(alx); | |
700 | if (err) { | |
701 | dev_err(&pdev->dev, "unrecognized chip, aborting\n"); | |
702 | return err; | |
703 | } | |
704 | ||
705 | alx->hw.lnk_patch = | |
706 | pdev->device == ALX_DEV_ID_AR8161 && | |
707 | pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC && | |
708 | pdev->subsystem_device == 0x0091 && | |
709 | pdev->revision == 0; | |
710 | ||
711 | hw->smb_timer = 400; | |
712 | hw->mtu = alx->dev->mtu; | |
713 | alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8); | |
714 | alx->tx_ringsz = 256; | |
715 | alx->rx_ringsz = 512; | |
716 | hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY; | |
717 | hw->imt = 200; | |
718 | alx->int_mask = ALX_ISR_MISC; | |
719 | hw->dma_chnl = hw->max_dma_chnl; | |
720 | hw->ith_tpd = alx->tx_ringsz / 3; | |
721 | hw->link_speed = SPEED_UNKNOWN; | |
722 | hw->adv_cfg = ADVERTISED_Autoneg | | |
723 | ADVERTISED_10baseT_Half | | |
724 | ADVERTISED_10baseT_Full | | |
725 | ADVERTISED_100baseT_Full | | |
726 | ADVERTISED_100baseT_Half | | |
727 | ADVERTISED_1000baseT_Full; | |
728 | hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX; | |
729 | ||
730 | hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN | | |
731 | ALX_MAC_CTRL_MHASH_ALG_HI5B | | |
732 | ALX_MAC_CTRL_BRD_EN | | |
733 | ALX_MAC_CTRL_PCRCE | | |
734 | ALX_MAC_CTRL_CRCE | | |
735 | ALX_MAC_CTRL_RXFC_EN | | |
736 | ALX_MAC_CTRL_TXFC_EN | | |
737 | 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; | |
738 | ||
739 | return err; | |
740 | } | |
741 | ||
742 | ||
743 | static netdev_features_t alx_fix_features(struct net_device *netdev, | |
744 | netdev_features_t features) | |
745 | { | |
746 | if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE) | |
747 | features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | |
748 | ||
749 | return features; | |
750 | } | |
751 | ||
752 | static void alx_netif_stop(struct alx_priv *alx) | |
753 | { | |
754 | alx->dev->trans_start = jiffies; | |
755 | if (netif_carrier_ok(alx->dev)) { | |
756 | netif_carrier_off(alx->dev); | |
757 | netif_tx_disable(alx->dev); | |
758 | napi_disable(&alx->napi); | |
759 | } | |
760 | } | |
761 | ||
762 | static void alx_halt(struct alx_priv *alx) | |
763 | { | |
764 | struct alx_hw *hw = &alx->hw; | |
765 | ||
766 | alx_netif_stop(alx); | |
767 | hw->link_speed = SPEED_UNKNOWN; | |
768 | ||
769 | alx_reset_mac(hw); | |
770 | ||
771 | /* disable l0s/l1 */ | |
772 | alx_enable_aspm(hw, false, false); | |
773 | alx_irq_disable(alx); | |
774 | alx_free_buffers(alx); | |
775 | } | |
776 | ||
777 | static void alx_configure(struct alx_priv *alx) | |
778 | { | |
779 | struct alx_hw *hw = &alx->hw; | |
780 | ||
781 | alx_configure_basic(hw); | |
782 | alx_disable_rss(hw); | |
783 | __alx_set_rx_mode(alx->dev); | |
784 | ||
785 | alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl); | |
786 | } | |
787 | ||
788 | static void alx_activate(struct alx_priv *alx) | |
789 | { | |
790 | /* hardware setting lost, restore it */ | |
791 | alx_reinit_rings(alx); | |
792 | alx_configure(alx); | |
793 | ||
794 | /* clear old interrupts */ | |
795 | alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); | |
796 | ||
797 | alx_irq_enable(alx); | |
798 | ||
799 | alx_schedule_link_check(alx); | |
800 | } | |
801 | ||
802 | static void alx_reinit(struct alx_priv *alx) | |
803 | { | |
804 | ASSERT_RTNL(); | |
805 | ||
806 | alx_halt(alx); | |
807 | alx_activate(alx); | |
808 | } | |
809 | ||
810 | static int alx_change_mtu(struct net_device *netdev, int mtu) | |
811 | { | |
812 | struct alx_priv *alx = netdev_priv(netdev); | |
813 | int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | |
814 | ||
815 | if ((max_frame < ALX_MIN_FRAME_SIZE) || | |
816 | (max_frame > ALX_MAX_FRAME_SIZE)) | |
817 | return -EINVAL; | |
818 | ||
819 | if (netdev->mtu == mtu) | |
820 | return 0; | |
821 | ||
822 | netdev->mtu = mtu; | |
823 | alx->hw.mtu = mtu; | |
824 | alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ? | |
825 | ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE; | |
826 | netdev_update_features(netdev); | |
827 | if (netif_running(netdev)) | |
828 | alx_reinit(alx); | |
829 | return 0; | |
830 | } | |
831 | ||
832 | static void alx_netif_start(struct alx_priv *alx) | |
833 | { | |
834 | netif_tx_wake_all_queues(alx->dev); | |
835 | napi_enable(&alx->napi); | |
836 | netif_carrier_on(alx->dev); | |
837 | } | |
838 | ||
839 | static int __alx_open(struct alx_priv *alx, bool resume) | |
840 | { | |
841 | int err; | |
842 | ||
843 | if (!resume) | |
844 | netif_carrier_off(alx->dev); | |
845 | ||
846 | err = alx_alloc_rings(alx); | |
847 | if (err) | |
848 | return err; | |
849 | ||
850 | alx_configure(alx); | |
851 | ||
852 | err = alx_request_irq(alx); | |
853 | if (err) | |
854 | goto out_free_rings; | |
855 | ||
856 | /* clear old interrupts */ | |
857 | alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); | |
858 | ||
859 | alx_irq_enable(alx); | |
860 | ||
861 | if (!resume) | |
862 | netif_tx_start_all_queues(alx->dev); | |
863 | ||
864 | alx_schedule_link_check(alx); | |
865 | return 0; | |
866 | ||
867 | out_free_rings: | |
868 | alx_free_rings(alx); | |
869 | return err; | |
870 | } | |
871 | ||
872 | static void __alx_stop(struct alx_priv *alx) | |
873 | { | |
874 | alx_halt(alx); | |
875 | alx_free_irq(alx); | |
876 | alx_free_rings(alx); | |
877 | } | |
878 | ||
879 | static const char *alx_speed_desc(u16 speed) | |
880 | { | |
881 | switch (speed) { | |
882 | case SPEED_1000 + DUPLEX_FULL: | |
883 | return "1 Gbps Full"; | |
884 | case SPEED_100 + DUPLEX_FULL: | |
885 | return "100 Mbps Full"; | |
886 | case SPEED_100 + DUPLEX_HALF: | |
887 | return "100 Mbps Half"; | |
888 | case SPEED_10 + DUPLEX_FULL: | |
889 | return "10 Mbps Full"; | |
890 | case SPEED_10 + DUPLEX_HALF: | |
891 | return "10 Mbps Half"; | |
892 | default: | |
893 | return "Unknown speed"; | |
894 | } | |
895 | } | |
896 | ||
897 | static void alx_check_link(struct alx_priv *alx) | |
898 | { | |
899 | struct alx_hw *hw = &alx->hw; | |
900 | unsigned long flags; | |
901 | int speed, old_speed; | |
902 | int err; | |
903 | ||
904 | /* clear PHY internal interrupt status, otherwise the main | |
905 | * interrupt status will be asserted forever | |
906 | */ | |
907 | alx_clear_phy_intr(hw); | |
908 | ||
909 | err = alx_get_phy_link(hw, &speed); | |
910 | if (err < 0) | |
911 | goto reset; | |
912 | ||
913 | spin_lock_irqsave(&alx->irq_lock, flags); | |
914 | alx->int_mask |= ALX_ISR_PHY; | |
915 | alx_write_mem32(hw, ALX_IMR, alx->int_mask); | |
916 | spin_unlock_irqrestore(&alx->irq_lock, flags); | |
917 | ||
918 | old_speed = hw->link_speed; | |
919 | ||
920 | if (old_speed == speed) | |
921 | return; | |
922 | hw->link_speed = speed; | |
923 | ||
924 | if (speed != SPEED_UNKNOWN) { | |
925 | netif_info(alx, link, alx->dev, | |
926 | "NIC Up: %s\n", alx_speed_desc(speed)); | |
927 | alx_post_phy_link(hw); | |
928 | alx_enable_aspm(hw, true, true); | |
929 | alx_start_mac(hw); | |
930 | ||
931 | if (old_speed == SPEED_UNKNOWN) | |
932 | alx_netif_start(alx); | |
933 | } else { | |
934 | /* link is now down */ | |
935 | alx_netif_stop(alx); | |
936 | netif_info(alx, link, alx->dev, "Link Down\n"); | |
937 | err = alx_reset_mac(hw); | |
938 | if (err) | |
939 | goto reset; | |
940 | alx_irq_disable(alx); | |
941 | ||
942 | /* MAC reset causes all HW settings to be lost, restore all */ | |
943 | err = alx_reinit_rings(alx); | |
944 | if (err) | |
945 | goto reset; | |
946 | alx_configure(alx); | |
947 | alx_enable_aspm(hw, false, true); | |
948 | alx_post_phy_link(hw); | |
949 | alx_irq_enable(alx); | |
950 | } | |
951 | ||
952 | return; | |
953 | ||
954 | reset: | |
955 | alx_schedule_reset(alx); | |
956 | } | |
957 | ||
958 | static int alx_open(struct net_device *netdev) | |
959 | { | |
960 | return __alx_open(netdev_priv(netdev), false); | |
961 | } | |
962 | ||
963 | static int alx_stop(struct net_device *netdev) | |
964 | { | |
965 | __alx_stop(netdev_priv(netdev)); | |
966 | return 0; | |
967 | } | |
968 | ||
969 | static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en) | |
970 | { | |
971 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
972 | struct net_device *netdev = alx->dev; | |
973 | struct alx_hw *hw = &alx->hw; | |
974 | int err, speed; | |
975 | ||
976 | netif_device_detach(netdev); | |
977 | ||
978 | if (netif_running(netdev)) | |
979 | __alx_stop(alx); | |
980 | ||
981 | #ifdef CONFIG_PM_SLEEP | |
982 | err = pci_save_state(pdev); | |
983 | if (err) | |
984 | return err; | |
985 | #endif | |
986 | ||
987 | err = alx_select_powersaving_speed(hw, &speed); | |
988 | if (err) | |
989 | return err; | |
990 | err = alx_clear_phy_intr(hw); | |
991 | if (err) | |
992 | return err; | |
993 | err = alx_pre_suspend(hw, speed); | |
994 | if (err) | |
995 | return err; | |
996 | err = alx_config_wol(hw); | |
997 | if (err) | |
998 | return err; | |
999 | ||
1000 | *wol_en = false; | |
1001 | if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) { | |
1002 | netif_info(alx, wol, netdev, | |
1003 | "wol: ctrl=%X, speed=%X\n", | |
1004 | hw->sleep_ctrl, speed); | |
1005 | device_set_wakeup_enable(&pdev->dev, true); | |
1006 | *wol_en = true; | |
1007 | } | |
1008 | ||
1009 | pci_disable_device(pdev); | |
1010 | ||
1011 | return 0; | |
1012 | } | |
1013 | ||
1014 | static void alx_shutdown(struct pci_dev *pdev) | |
1015 | { | |
1016 | int err; | |
1017 | bool wol_en; | |
1018 | ||
1019 | err = __alx_shutdown(pdev, &wol_en); | |
1020 | if (!err) { | |
1021 | pci_wake_from_d3(pdev, wol_en); | |
1022 | pci_set_power_state(pdev, PCI_D3hot); | |
1023 | } else { | |
1024 | dev_err(&pdev->dev, "shutdown fail %d\n", err); | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | static void alx_link_check(struct work_struct *work) | |
1029 | { | |
1030 | struct alx_priv *alx; | |
1031 | ||
1032 | alx = container_of(work, struct alx_priv, link_check_wk); | |
1033 | ||
1034 | rtnl_lock(); | |
1035 | alx_check_link(alx); | |
1036 | rtnl_unlock(); | |
1037 | } | |
1038 | ||
1039 | static void alx_reset(struct work_struct *work) | |
1040 | { | |
1041 | struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk); | |
1042 | ||
1043 | rtnl_lock(); | |
1044 | alx_reinit(alx); | |
1045 | rtnl_unlock(); | |
1046 | } | |
1047 | ||
1048 | static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first) | |
1049 | { | |
1050 | u8 cso, css; | |
1051 | ||
1052 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1053 | return 0; | |
1054 | ||
1055 | cso = skb_checksum_start_offset(skb); | |
1056 | if (cso & 1) | |
1057 | return -EINVAL; | |
1058 | ||
1059 | css = cso + skb->csum_offset; | |
1060 | first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT); | |
1061 | first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT); | |
1062 | first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT); | |
1063 | ||
1064 | return 0; | |
1065 | } | |
1066 | ||
1067 | static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) | |
1068 | { | |
1069 | struct alx_tx_queue *txq = &alx->txq; | |
1070 | struct alx_txd *tpd, *first_tpd; | |
1071 | dma_addr_t dma; | |
1072 | int maplen, f, first_idx = txq->write_idx; | |
1073 | ||
1074 | first_tpd = &txq->tpd[txq->write_idx]; | |
1075 | tpd = first_tpd; | |
1076 | ||
1077 | maplen = skb_headlen(skb); | |
1078 | dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, | |
1079 | DMA_TO_DEVICE); | |
1080 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) | |
1081 | goto err_dma; | |
1082 | ||
1083 | dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); | |
1084 | dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); | |
1085 | ||
1086 | tpd->adrl.addr = cpu_to_le64(dma); | |
1087 | tpd->len = cpu_to_le16(maplen); | |
1088 | ||
1089 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { | |
1090 | struct skb_frag_struct *frag; | |
1091 | ||
1092 | frag = &skb_shinfo(skb)->frags[f]; | |
1093 | ||
1094 | if (++txq->write_idx == alx->tx_ringsz) | |
1095 | txq->write_idx = 0; | |
1096 | tpd = &txq->tpd[txq->write_idx]; | |
1097 | ||
1098 | tpd->word1 = first_tpd->word1; | |
1099 | ||
1100 | maplen = skb_frag_size(frag); | |
1101 | dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, | |
1102 | maplen, DMA_TO_DEVICE); | |
1103 | if (dma_mapping_error(&alx->hw.pdev->dev, dma)) | |
1104 | goto err_dma; | |
1105 | dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); | |
1106 | dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); | |
1107 | ||
1108 | tpd->adrl.addr = cpu_to_le64(dma); | |
1109 | tpd->len = cpu_to_le16(maplen); | |
1110 | } | |
1111 | ||
1112 | /* last TPD, set EOP flag and store skb */ | |
1113 | tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); | |
1114 | txq->bufs[txq->write_idx].skb = skb; | |
1115 | ||
1116 | if (++txq->write_idx == alx->tx_ringsz) | |
1117 | txq->write_idx = 0; | |
1118 | ||
1119 | return 0; | |
1120 | ||
1121 | err_dma: | |
1122 | f = first_idx; | |
1123 | while (f != txq->write_idx) { | |
1124 | alx_free_txbuf(alx, f); | |
1125 | if (++f == alx->tx_ringsz) | |
1126 | f = 0; | |
1127 | } | |
1128 | return -ENOMEM; | |
1129 | } | |
1130 | ||
1131 | static netdev_tx_t alx_start_xmit(struct sk_buff *skb, | |
1132 | struct net_device *netdev) | |
1133 | { | |
1134 | struct alx_priv *alx = netdev_priv(netdev); | |
1135 | struct alx_tx_queue *txq = &alx->txq; | |
1136 | struct alx_txd *first; | |
1137 | int tpdreq = skb_shinfo(skb)->nr_frags + 1; | |
1138 | ||
1139 | if (alx_tpd_avail(alx) < tpdreq) { | |
1140 | netif_stop_queue(alx->dev); | |
1141 | goto drop; | |
1142 | } | |
1143 | ||
1144 | first = &txq->tpd[txq->write_idx]; | |
1145 | memset(first, 0, sizeof(*first)); | |
1146 | ||
1147 | if (alx_tx_csum(skb, first)) | |
1148 | goto drop; | |
1149 | ||
1150 | if (alx_map_tx_skb(alx, skb) < 0) | |
1151 | goto drop; | |
1152 | ||
1153 | netdev_sent_queue(alx->dev, skb->len); | |
1154 | ||
1155 | /* flush updates before updating hardware */ | |
1156 | wmb(); | |
1157 | alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); | |
1158 | ||
1159 | if (alx_tpd_avail(alx) < alx->tx_ringsz/8) | |
1160 | netif_stop_queue(alx->dev); | |
1161 | ||
1162 | return NETDEV_TX_OK; | |
1163 | ||
1164 | drop: | |
1165 | dev_kfree_skb(skb); | |
1166 | return NETDEV_TX_OK; | |
1167 | } | |
1168 | ||
1169 | static void alx_tx_timeout(struct net_device *dev) | |
1170 | { | |
1171 | struct alx_priv *alx = netdev_priv(dev); | |
1172 | ||
1173 | alx_schedule_reset(alx); | |
1174 | } | |
1175 | ||
1176 | static int alx_mdio_read(struct net_device *netdev, | |
1177 | int prtad, int devad, u16 addr) | |
1178 | { | |
1179 | struct alx_priv *alx = netdev_priv(netdev); | |
1180 | struct alx_hw *hw = &alx->hw; | |
1181 | u16 val; | |
1182 | int err; | |
1183 | ||
1184 | if (prtad != hw->mdio.prtad) | |
1185 | return -EINVAL; | |
1186 | ||
1187 | if (devad == MDIO_DEVAD_NONE) | |
1188 | err = alx_read_phy_reg(hw, addr, &val); | |
1189 | else | |
1190 | err = alx_read_phy_ext(hw, devad, addr, &val); | |
1191 | ||
1192 | if (err) | |
1193 | return err; | |
1194 | return val; | |
1195 | } | |
1196 | ||
1197 | static int alx_mdio_write(struct net_device *netdev, | |
1198 | int prtad, int devad, u16 addr, u16 val) | |
1199 | { | |
1200 | struct alx_priv *alx = netdev_priv(netdev); | |
1201 | struct alx_hw *hw = &alx->hw; | |
1202 | ||
1203 | if (prtad != hw->mdio.prtad) | |
1204 | return -EINVAL; | |
1205 | ||
1206 | if (devad == MDIO_DEVAD_NONE) | |
1207 | return alx_write_phy_reg(hw, addr, val); | |
1208 | ||
1209 | return alx_write_phy_ext(hw, devad, addr, val); | |
1210 | } | |
1211 | ||
1212 | static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
1213 | { | |
1214 | struct alx_priv *alx = netdev_priv(netdev); | |
1215 | ||
1216 | if (!netif_running(netdev)) | |
1217 | return -EAGAIN; | |
1218 | ||
1219 | return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd); | |
1220 | } | |
1221 | ||
1222 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1223 | static void alx_poll_controller(struct net_device *netdev) | |
1224 | { | |
1225 | struct alx_priv *alx = netdev_priv(netdev); | |
1226 | ||
1227 | if (alx->msi) | |
1228 | alx_intr_msi(0, alx); | |
1229 | else | |
1230 | alx_intr_legacy(0, alx); | |
1231 | } | |
1232 | #endif | |
1233 | ||
1234 | static const struct net_device_ops alx_netdev_ops = { | |
1235 | .ndo_open = alx_open, | |
1236 | .ndo_stop = alx_stop, | |
1237 | .ndo_start_xmit = alx_start_xmit, | |
1238 | .ndo_set_rx_mode = alx_set_rx_mode, | |
1239 | .ndo_validate_addr = eth_validate_addr, | |
1240 | .ndo_set_mac_address = alx_set_mac_address, | |
1241 | .ndo_change_mtu = alx_change_mtu, | |
1242 | .ndo_do_ioctl = alx_ioctl, | |
1243 | .ndo_tx_timeout = alx_tx_timeout, | |
1244 | .ndo_fix_features = alx_fix_features, | |
1245 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1246 | .ndo_poll_controller = alx_poll_controller, | |
1247 | #endif | |
1248 | }; | |
1249 | ||
1250 | static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
1251 | { | |
1252 | struct net_device *netdev; | |
1253 | struct alx_priv *alx; | |
1254 | struct alx_hw *hw; | |
1255 | bool phy_configured; | |
1256 | int bars, pm_cap, err; | |
1257 | ||
1258 | err = pci_enable_device_mem(pdev); | |
1259 | if (err) | |
1260 | return err; | |
1261 | ||
1262 | /* The alx chip can DMA to 64-bit addresses, but it uses a single | |
1263 | * shared register for the high 32 bits, so only a single, aligned, | |
1264 | * 4 GB physical address range can be used for descriptors. | |
1265 | */ | |
1266 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | |
1267 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | |
1268 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); | |
1269 | } else { | |
1270 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | |
1271 | if (err) { | |
1272 | err = dma_set_coherent_mask(&pdev->dev, | |
1273 | DMA_BIT_MASK(32)); | |
1274 | if (err) { | |
1275 | dev_err(&pdev->dev, | |
1276 | "No usable DMA config, aborting\n"); | |
1277 | goto out_pci_disable; | |
1278 | } | |
1279 | } | |
1280 | } | |
1281 | ||
1282 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
1283 | err = pci_request_selected_regions(pdev, bars, alx_drv_name); | |
1284 | if (err) { | |
1285 | dev_err(&pdev->dev, | |
1286 | "pci_request_selected_regions failed(bars:%d)\n", bars); | |
1287 | goto out_pci_disable; | |
1288 | } | |
1289 | ||
1290 | pci_enable_pcie_error_reporting(pdev); | |
1291 | pci_set_master(pdev); | |
1292 | ||
1293 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | |
1294 | if (pm_cap == 0) { | |
1295 | dev_err(&pdev->dev, | |
1296 | "Can't find power management capability, aborting\n"); | |
1297 | err = -EIO; | |
1298 | goto out_pci_release; | |
1299 | } | |
1300 | ||
1301 | err = pci_set_power_state(pdev, PCI_D0); | |
1302 | if (err) | |
1303 | goto out_pci_release; | |
1304 | ||
1305 | netdev = alloc_etherdev(sizeof(*alx)); | |
1306 | if (!netdev) { | |
1307 | err = -ENOMEM; | |
1308 | goto out_pci_release; | |
1309 | } | |
1310 | ||
1311 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1312 | alx = netdev_priv(netdev); | |
61b6f128 ML |
1313 | spin_lock_init(&alx->hw.mdio_lock); |
1314 | spin_lock_init(&alx->irq_lock); | |
ab69bde6 JB |
1315 | alx->dev = netdev; |
1316 | alx->hw.pdev = pdev; | |
1317 | alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP | | |
1318 | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL; | |
1319 | hw = &alx->hw; | |
1320 | pci_set_drvdata(pdev, alx); | |
1321 | ||
1322 | hw->hw_addr = pci_ioremap_bar(pdev, 0); | |
1323 | if (!hw->hw_addr) { | |
1324 | dev_err(&pdev->dev, "cannot map device registers\n"); | |
1325 | err = -EIO; | |
1326 | goto out_free_netdev; | |
1327 | } | |
1328 | ||
1329 | netdev->netdev_ops = &alx_netdev_ops; | |
1330 | SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops); | |
1331 | netdev->irq = pdev->irq; | |
1332 | netdev->watchdog_timeo = ALX_WATCHDOG_TIME; | |
1333 | ||
1334 | if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) | |
1335 | pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; | |
1336 | ||
1337 | err = alx_init_sw(alx); | |
1338 | if (err) { | |
1339 | dev_err(&pdev->dev, "net device private data init failed\n"); | |
1340 | goto out_unmap; | |
1341 | } | |
1342 | ||
1343 | alx_reset_pcie(hw); | |
1344 | ||
1345 | phy_configured = alx_phy_configured(hw); | |
1346 | ||
1347 | if (!phy_configured) | |
1348 | alx_reset_phy(hw); | |
1349 | ||
1350 | err = alx_reset_mac(hw); | |
1351 | if (err) { | |
1352 | dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); | |
1353 | goto out_unmap; | |
1354 | } | |
1355 | ||
1356 | /* setup link to put it in a known good starting state */ | |
1357 | if (!phy_configured) { | |
1358 | err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); | |
1359 | if (err) { | |
1360 | dev_err(&pdev->dev, | |
1361 | "failed to configure PHY speed/duplex (err=%d)\n", | |
1362 | err); | |
1363 | goto out_unmap; | |
1364 | } | |
1365 | } | |
1366 | ||
1367 | netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; | |
1368 | ||
1369 | if (alx_get_perm_macaddr(hw, hw->perm_addr)) { | |
1370 | dev_warn(&pdev->dev, | |
1371 | "Invalid permanent address programmed, using random one\n"); | |
1372 | eth_hw_addr_random(netdev); | |
1373 | memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len); | |
1374 | } | |
1375 | ||
1376 | memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN); | |
1377 | memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN); | |
1378 | memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN); | |
1379 | ||
1380 | hw->mdio.prtad = 0; | |
1381 | hw->mdio.mmds = 0; | |
1382 | hw->mdio.dev = netdev; | |
1383 | hw->mdio.mode_support = MDIO_SUPPORTS_C45 | | |
1384 | MDIO_SUPPORTS_C22 | | |
1385 | MDIO_EMULATE_C22; | |
1386 | hw->mdio.mdio_read = alx_mdio_read; | |
1387 | hw->mdio.mdio_write = alx_mdio_write; | |
1388 | ||
1389 | if (!alx_get_phy_info(hw)) { | |
1390 | dev_err(&pdev->dev, "failed to identify PHY\n"); | |
1391 | err = -EIO; | |
1392 | goto out_unmap; | |
1393 | } | |
1394 | ||
1395 | INIT_WORK(&alx->link_check_wk, alx_link_check); | |
1396 | INIT_WORK(&alx->reset_wk, alx_reset); | |
ab69bde6 JB |
1397 | netif_carrier_off(netdev); |
1398 | ||
1399 | err = register_netdev(netdev); | |
1400 | if (err) { | |
1401 | dev_err(&pdev->dev, "register netdevice failed\n"); | |
1402 | goto out_unmap; | |
1403 | } | |
1404 | ||
1405 | device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl); | |
1406 | ||
1407 | netdev_info(netdev, | |
1408 | "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n", | |
1409 | netdev->dev_addr); | |
1410 | ||
1411 | return 0; | |
1412 | ||
1413 | out_unmap: | |
1414 | iounmap(hw->hw_addr); | |
1415 | out_free_netdev: | |
1416 | free_netdev(netdev); | |
1417 | out_pci_release: | |
1418 | pci_release_selected_regions(pdev, bars); | |
1419 | out_pci_disable: | |
1420 | pci_disable_device(pdev); | |
1421 | return err; | |
1422 | } | |
1423 | ||
1424 | static void alx_remove(struct pci_dev *pdev) | |
1425 | { | |
1426 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1427 | struct alx_hw *hw = &alx->hw; | |
1428 | ||
1429 | cancel_work_sync(&alx->link_check_wk); | |
1430 | cancel_work_sync(&alx->reset_wk); | |
1431 | ||
1432 | /* restore permanent mac address */ | |
1433 | alx_set_macaddr(hw, hw->perm_addr); | |
1434 | ||
1435 | unregister_netdev(alx->dev); | |
1436 | iounmap(hw->hw_addr); | |
1437 | pci_release_selected_regions(pdev, | |
1438 | pci_select_bars(pdev, IORESOURCE_MEM)); | |
1439 | ||
1440 | pci_disable_pcie_error_reporting(pdev); | |
1441 | pci_disable_device(pdev); | |
1442 | pci_set_drvdata(pdev, NULL); | |
1443 | ||
1444 | free_netdev(alx->dev); | |
1445 | } | |
1446 | ||
1447 | #ifdef CONFIG_PM_SLEEP | |
1448 | static int alx_suspend(struct device *dev) | |
1449 | { | |
1450 | struct pci_dev *pdev = to_pci_dev(dev); | |
1451 | int err; | |
1452 | bool wol_en; | |
1453 | ||
1454 | err = __alx_shutdown(pdev, &wol_en); | |
1455 | if (err) { | |
1456 | dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err); | |
1457 | return err; | |
1458 | } | |
1459 | ||
1460 | if (wol_en) { | |
1461 | pci_prepare_to_sleep(pdev); | |
1462 | } else { | |
1463 | pci_wake_from_d3(pdev, false); | |
1464 | pci_set_power_state(pdev, PCI_D3hot); | |
1465 | } | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static int alx_resume(struct device *dev) | |
1471 | { | |
1472 | struct pci_dev *pdev = to_pci_dev(dev); | |
1473 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1474 | struct net_device *netdev = alx->dev; | |
1475 | struct alx_hw *hw = &alx->hw; | |
1476 | int err; | |
1477 | ||
1478 | pci_set_power_state(pdev, PCI_D0); | |
1479 | pci_restore_state(pdev); | |
1480 | pci_save_state(pdev); | |
1481 | ||
1482 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
1483 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
1484 | ||
1485 | hw->link_speed = SPEED_UNKNOWN; | |
1486 | alx->int_mask = ALX_ISR_MISC; | |
1487 | ||
1488 | alx_reset_pcie(hw); | |
1489 | alx_reset_phy(hw); | |
1490 | ||
1491 | err = alx_reset_mac(hw); | |
1492 | if (err) { | |
1493 | netif_err(alx, hw, alx->dev, | |
1494 | "resume:reset_mac fail %d\n", err); | |
1495 | return -EIO; | |
1496 | } | |
1497 | ||
1498 | err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl); | |
1499 | if (err) { | |
1500 | netif_err(alx, hw, alx->dev, | |
1501 | "resume:setup_speed_duplex fail %d\n", err); | |
1502 | return -EIO; | |
1503 | } | |
1504 | ||
1505 | if (netif_running(netdev)) { | |
1506 | err = __alx_open(alx, true); | |
1507 | if (err) | |
1508 | return err; | |
1509 | } | |
1510 | ||
1511 | netif_device_attach(netdev); | |
1512 | ||
1513 | return err; | |
1514 | } | |
1515 | #endif | |
1516 | ||
1517 | static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev, | |
1518 | pci_channel_state_t state) | |
1519 | { | |
1520 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1521 | struct net_device *netdev = alx->dev; | |
1522 | pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET; | |
1523 | ||
1524 | dev_info(&pdev->dev, "pci error detected\n"); | |
1525 | ||
1526 | rtnl_lock(); | |
1527 | ||
1528 | if (netif_running(netdev)) { | |
1529 | netif_device_detach(netdev); | |
1530 | alx_halt(alx); | |
1531 | } | |
1532 | ||
1533 | if (state == pci_channel_io_perm_failure) | |
1534 | rc = PCI_ERS_RESULT_DISCONNECT; | |
1535 | else | |
1536 | pci_disable_device(pdev); | |
1537 | ||
1538 | rtnl_unlock(); | |
1539 | ||
1540 | return rc; | |
1541 | } | |
1542 | ||
1543 | static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev) | |
1544 | { | |
1545 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1546 | struct alx_hw *hw = &alx->hw; | |
1547 | pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; | |
1548 | ||
1549 | dev_info(&pdev->dev, "pci error slot reset\n"); | |
1550 | ||
1551 | rtnl_lock(); | |
1552 | ||
1553 | if (pci_enable_device(pdev)) { | |
1554 | dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); | |
1555 | goto out; | |
1556 | } | |
1557 | ||
1558 | pci_set_master(pdev); | |
1559 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
1560 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
1561 | ||
1562 | alx_reset_pcie(hw); | |
1563 | if (!alx_reset_mac(hw)) | |
1564 | rc = PCI_ERS_RESULT_RECOVERED; | |
1565 | out: | |
1566 | pci_cleanup_aer_uncorrect_error_status(pdev); | |
1567 | ||
1568 | rtnl_unlock(); | |
1569 | ||
1570 | return rc; | |
1571 | } | |
1572 | ||
1573 | static void alx_pci_error_resume(struct pci_dev *pdev) | |
1574 | { | |
1575 | struct alx_priv *alx = pci_get_drvdata(pdev); | |
1576 | struct net_device *netdev = alx->dev; | |
1577 | ||
1578 | dev_info(&pdev->dev, "pci error resume\n"); | |
1579 | ||
1580 | rtnl_lock(); | |
1581 | ||
1582 | if (netif_running(netdev)) { | |
1583 | alx_activate(alx); | |
1584 | netif_device_attach(netdev); | |
1585 | } | |
1586 | ||
1587 | rtnl_unlock(); | |
1588 | } | |
1589 | ||
1590 | static const struct pci_error_handlers alx_err_handlers = { | |
1591 | .error_detected = alx_pci_error_detected, | |
1592 | .slot_reset = alx_pci_error_slot_reset, | |
1593 | .resume = alx_pci_error_resume, | |
1594 | }; | |
1595 | ||
1596 | #ifdef CONFIG_PM_SLEEP | |
1597 | static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); | |
1598 | #define ALX_PM_OPS (&alx_pm_ops) | |
1599 | #else | |
1600 | #define ALX_PM_OPS NULL | |
1601 | #endif | |
1602 | ||
1603 | static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = { | |
1604 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161), | |
1605 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1606 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), | |
1607 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1608 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), | |
1609 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | |
1610 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, | |
1611 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) }, | |
1612 | {} | |
1613 | }; | |
1614 | ||
1615 | static struct pci_driver alx_driver = { | |
1616 | .name = alx_drv_name, | |
1617 | .id_table = alx_pci_tbl, | |
1618 | .probe = alx_probe, | |
1619 | .remove = alx_remove, | |
1620 | .shutdown = alx_shutdown, | |
1621 | .err_handler = &alx_err_handlers, | |
1622 | .driver.pm = ALX_PM_OPS, | |
1623 | }; | |
1624 | ||
1625 | module_pci_driver(alx_driver); | |
1626 | MODULE_DEVICE_TABLE(pci, alx_pci_tbl); | |
1627 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | |
1628 | MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>"); | |
1629 | MODULE_DESCRIPTION( | |
1630 | "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver"); | |
1631 | MODULE_LICENSE("GPL"); |