Staging: et131x: config is already zeroed
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / cxgb3 / xgmac.c
1 /*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include "common.h"
33 #include "regs.h"
34
35 /*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39 #define EXACT_ADDR_FILTERS 8
40
41 static inline int macidx(const struct cmac *mac)
42 {
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44 }
45
46 static void xaui_serdes_reset(struct cmac *mac)
47 {
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68 }
69
70 void t3b_pcs_reset(struct cmac *mac)
71 {
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77 }
78
79 int t3_mac_reset(struct cmac *mac)
80 {
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
110
111 if (uses_xaui(adap)) {
112 if (adap->params.rev == 0) {
113 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
114 F_RXENABLE | F_TXENABLE);
115 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
116 F_CMULOCK, 1, 5, 2)) {
117 CH_ERR(adap,
118 "MAC %d XAUI SERDES CMU lock failed\n",
119 macidx(mac));
120 return -1;
121 }
122 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
123 F_SERDESRESET_);
124 } else
125 xaui_serdes_reset(mac);
126 }
127
128 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
129 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
130 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
131 val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
132
133 if (is_10G(adap))
134 val |= F_PCS_RESET_;
135 else if (uses_xaui(adap))
136 val |= F_PCS_RESET_ | F_XG2G_RESET_;
137 else
138 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
139 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
140 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
141 if ((val & F_PCS_RESET_) && adap->params.rev) {
142 msleep(1);
143 t3b_pcs_reset(mac);
144 }
145
146 memset(&mac->stats, 0, sizeof(mac->stats));
147 return 0;
148 }
149
150 static int t3b2_mac_reset(struct cmac *mac)
151 {
152 struct adapter *adap = mac->adapter;
153 unsigned int oft = mac->offset, store;
154 int idx = macidx(mac);
155 u32 val;
156
157 if (!macidx(mac))
158 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
159 else
160 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
161
162 /* Stop NIC traffic to reduce the number of TXTOGGLES */
163 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
164 /* Ensure TX drains */
165 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
166
167 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
168 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
169
170 /* Store A_TP_TX_DROP_CFG_CH0 */
171 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
172 store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
173
174 msleep(10);
175
176 /* Change DROP_CFG to 0xc0000011 */
177 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
178 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
179
180 /* Check for xgm Rx fifo empty */
181 /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
182 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
183 0x80000000, 1, 1000, 2)) {
184 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
185 macidx(mac));
186 return -1;
187 }
188
189 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0);
190 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
191
192 val = F_MAC_RESET_;
193 if (is_10G(adap))
194 val |= F_PCS_RESET_;
195 else if (uses_xaui(adap))
196 val |= F_PCS_RESET_ | F_XG2G_RESET_;
197 else
198 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
199 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
200 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
201 if ((val & F_PCS_RESET_) && adap->params.rev) {
202 msleep(1);
203 t3b_pcs_reset(mac);
204 }
205 t3_write_reg(adap, A_XGM_RX_CFG + oft,
206 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
207 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST);
208
209 /* Restore the DROP_CFG */
210 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
211 t3_write_reg(adap, A_TP_PIO_DATA, store);
212
213 if (!idx)
214 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
215 else
216 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
217
218 /* re-enable nic traffic */
219 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
220
221 /* Set: re-enable NIC traffic */
222 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
223
224 return 0;
225 }
226
227 /*
228 * Set the exact match register 'idx' to recognize the given Ethernet address.
229 */
230 static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
231 {
232 u32 addr_lo, addr_hi;
233 unsigned int oft = mac->offset + idx * 8;
234
235 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
236 addr_hi = (addr[5] << 8) | addr[4];
237
238 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
239 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
240 }
241
242 /* Set one of the station's unicast MAC addresses. */
243 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
244 {
245 if (idx >= mac->nucast)
246 return -EINVAL;
247 set_addr_filter(mac, idx, addr);
248 return 0;
249 }
250
251 /*
252 * Specify the number of exact address filters that should be reserved for
253 * unicast addresses. Caller should reload the unicast and multicast addresses
254 * after calling this.
255 */
256 int t3_mac_set_num_ucast(struct cmac *mac, int n)
257 {
258 if (n > EXACT_ADDR_FILTERS)
259 return -EINVAL;
260 mac->nucast = n;
261 return 0;
262 }
263
264 void t3_mac_disable_exact_filters(struct cmac *mac)
265 {
266 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
267
268 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
269 u32 v = t3_read_reg(mac->adapter, reg);
270 t3_write_reg(mac->adapter, reg, v);
271 }
272 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
273 }
274
275 void t3_mac_enable_exact_filters(struct cmac *mac)
276 {
277 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
278
279 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
280 u32 v = t3_read_reg(mac->adapter, reg);
281 t3_write_reg(mac->adapter, reg, v);
282 }
283 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
284 }
285
286 /* Calculate the RX hash filter index of an Ethernet address */
287 static int hash_hw_addr(const u8 * addr)
288 {
289 int hash = 0, octet, bit, i = 0, c;
290
291 for (octet = 0; octet < 6; ++octet)
292 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
293 hash ^= (c & 1) << i;
294 if (++i == 6)
295 i = 0;
296 }
297 return hash;
298 }
299
300 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
301 {
302 u32 val, hash_lo, hash_hi;
303 struct adapter *adap = mac->adapter;
304 unsigned int oft = mac->offset;
305
306 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
307 if (rm->dev->flags & IFF_PROMISC)
308 val |= F_COPYALLFRAMES;
309 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
310
311 if (rm->dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff;
313 else {
314 u8 *addr;
315 int exact_addr_idx = mac->nucast;
316
317 hash_lo = hash_hi = 0;
318 while ((addr = t3_get_next_mcaddr(rm)))
319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++, addr);
321 else {
322 int hash = hash_hw_addr(addr);
323
324 if (hash < 32)
325 hash_lo |= (1 << hash);
326 else
327 hash_hi |= (1 << (hash - 32));
328 }
329 }
330
331 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
332 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
333 return 0;
334 }
335
336 static int rx_fifo_hwm(int mtu)
337 {
338 int hwm;
339
340 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
341 return min(hwm, MAC_RXFIFO_SIZE - 8192);
342 }
343
344 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
345 {
346 int hwm, lwm, divisor;
347 int ipg;
348 unsigned int thres, v, reg;
349 struct adapter *adap = mac->adapter;
350
351 /*
352 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
353 * packet size register includes header, but not FCS.
354 */
355 mtu += 14;
356 if (mtu > MAX_FRAME_SIZE - 4)
357 return -EINVAL;
358 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
359
360 if (adap->params.rev >= T3_REV_B2 &&
361 (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
362 t3_mac_disable_exact_filters(mac);
363 v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
364 t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
365 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
366
367 reg = adap->params.rev == T3_REV_B2 ?
368 A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
369
370 /* drain RX FIFO */
371 if (t3_wait_op_done(adap, reg + mac->offset,
372 F_RXFIFO_EMPTY, 1, 20, 5)) {
373 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
374 t3_mac_enable_exact_filters(mac);
375 return -EIO;
376 }
377 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
378 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
379 V_RXMAXPKTSIZE(mtu));
380 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
381 t3_mac_enable_exact_filters(mac);
382 } else
383 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
384 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
385 V_RXMAXPKTSIZE(mtu));
386
387 /*
388 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
389 * HWM only if flow-control is enabled.
390 */
391 hwm = rx_fifo_hwm(mtu);
392 lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4);
393 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
394 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
395 v |= V_RXFIFOPAUSELWM(lwm / 8);
396 if (G_RXFIFOPAUSEHWM(v))
397 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
398 V_RXFIFOPAUSEHWM(hwm / 8);
399
400 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
401
402 /* Adjust the TX FIFO threshold based on the MTU */
403 thres = (adap->params.vpd.cclk * 1000) / 15625;
404 thres = (thres * mtu) / 1000;
405 if (is_10G(adap))
406 thres /= 10;
407 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
408 thres = max(thres, 8U); /* need at least 8 */
409 ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
410 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
411 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
412 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
413
414 if (adap->params.rev > 0) {
415 divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
416 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
417 (hwm - lwm) * 4 / divisor);
418 }
419 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
420 MAC_RXFIFO_SIZE * 4 * 8 / 512);
421 return 0;
422 }
423
424 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
425 {
426 u32 val;
427 struct adapter *adap = mac->adapter;
428 unsigned int oft = mac->offset;
429
430 if (duplex >= 0 && duplex != DUPLEX_FULL)
431 return -EINVAL;
432 if (speed >= 0) {
433 if (speed == SPEED_10)
434 val = V_PORTSPEED(0);
435 else if (speed == SPEED_100)
436 val = V_PORTSPEED(1);
437 else if (speed == SPEED_1000)
438 val = V_PORTSPEED(2);
439 else if (speed == SPEED_10000)
440 val = V_PORTSPEED(3);
441 else
442 return -EINVAL;
443
444 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
445 V_PORTSPEED(M_PORTSPEED), val);
446 }
447
448 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
449 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
450 if (fc & PAUSE_TX) {
451 u32 rx_max_pkt_size =
452 G_RXMAXPKTSIZE(t3_read_reg(adap,
453 A_XGM_RX_MAX_PKT_SIZE + oft));
454 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(rx_max_pkt_size) / 8);
455 }
456 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
457
458 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
459 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
460 return 0;
461 }
462
463 int t3_mac_enable(struct cmac *mac, int which)
464 {
465 int idx = macidx(mac);
466 struct adapter *adap = mac->adapter;
467 unsigned int oft = mac->offset;
468 struct mac_stats *s = &mac->stats;
469
470 if (which & MAC_DIRECTION_TX) {
471 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
472 t3_write_reg(adap, A_TP_PIO_DATA,
473 adap->params.rev == T3_REV_C ?
474 0xc4ffff01 : 0xc0ede401);
475 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
476 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
477 adap->params.rev == T3_REV_C ? 0 : 1 << idx);
478
479 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
480
481 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
482 mac->tx_mcnt = s->tx_frames;
483 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
484 A_TP_PIO_DATA)));
485 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
486 A_XGM_TX_SPI4_SOP_EOP_CNT +
487 oft)));
488 mac->rx_mcnt = s->rx_frames;
489 mac->rx_pause = s->rx_pause;
490 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
491 A_XGM_RX_SPI4_SOP_EOP_CNT +
492 oft)));
493 mac->rx_ocnt = s->rx_fifo_ovfl;
494 mac->txen = F_TXEN;
495 mac->toggle_cnt = 0;
496 }
497 if (which & MAC_DIRECTION_RX)
498 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
499 return 0;
500 }
501
502 int t3_mac_disable(struct cmac *mac, int which)
503 {
504 struct adapter *adap = mac->adapter;
505
506 if (which & MAC_DIRECTION_TX) {
507 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
508 mac->txen = 0;
509 }
510 if (which & MAC_DIRECTION_RX) {
511 int val = F_MAC_RESET_;
512
513 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
514 F_PCS_RESET_, 0);
515 msleep(100);
516 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
517 if (is_10G(adap))
518 val |= F_PCS_RESET_;
519 else if (uses_xaui(adap))
520 val |= F_PCS_RESET_ | F_XG2G_RESET_;
521 else
522 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
523 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
524 }
525 return 0;
526 }
527
528 int t3b2_mac_watchdog_task(struct cmac *mac)
529 {
530 struct adapter *adap = mac->adapter;
531 struct mac_stats *s = &mac->stats;
532 unsigned int tx_tcnt, tx_xcnt;
533 u64 tx_mcnt = s->tx_frames;
534 int status;
535
536 status = 0;
537 tx_xcnt = 1; /* By default tx_xcnt is making progress */
538 tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */
539 if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
540 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
541 A_XGM_TX_SPI4_SOP_EOP_CNT +
542 mac->offset)));
543 if (tx_xcnt == 0) {
544 t3_write_reg(adap, A_TP_PIO_ADDR,
545 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
546 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
547 A_TP_PIO_DATA)));
548 } else {
549 goto out;
550 }
551 } else {
552 mac->toggle_cnt = 0;
553 goto out;
554 }
555
556 if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
557 if (mac->toggle_cnt > 4) {
558 status = 2;
559 goto out;
560 } else {
561 status = 1;
562 goto out;
563 }
564 } else {
565 mac->toggle_cnt = 0;
566 goto out;
567 }
568
569 out:
570 mac->tx_tcnt = tx_tcnt;
571 mac->tx_xcnt = tx_xcnt;
572 mac->tx_mcnt = s->tx_frames;
573 mac->rx_pause = s->rx_pause;
574 if (status == 1) {
575 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
576 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
577 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
578 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
579 mac->toggle_cnt++;
580 } else if (status == 2) {
581 t3b2_mac_reset(mac);
582 mac->toggle_cnt = 0;
583 }
584 return status;
585 }
586
587 /*
588 * This function is called periodically to accumulate the current values of the
589 * RMON counters into the port statistics. Since the packet counters are only
590 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
591 * called more frequently than that. The byte counters are 45-bit wide, they
592 * would overflow in ~7.8 hours.
593 */
594 const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
595 {
596 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
597 #define RMON_UPDATE(mac, name, reg) \
598 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
599 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
600 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
601 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
602
603 u32 v, lo;
604
605 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
606 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
607 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
608 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
609 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
610 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
611 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
612 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
613 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
614
615 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
616
617 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
618 if (mac->adapter->params.rev == T3_REV_B2)
619 v &= 0x7fffffff;
620 mac->stats.rx_too_long += v;
621
622 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
623 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
624 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
625 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
626 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
627 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
628 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
629
630 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
631 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
632 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
633 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
634 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
635 /* This counts error frames in general (bad FCS, underrun, etc). */
636 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
637
638 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
639 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
640 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
641 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
642 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
643 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
644 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
645
646 /* The next stat isn't clear-on-read. */
647 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
648 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
649 lo = (u32) mac->stats.rx_cong_drops;
650 mac->stats.rx_cong_drops += (u64) (v - lo);
651
652 return &mac->stats;
653 }