Merge 4.14.24 into android-4.14
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / ethernet / stmicro / stmmac / enh_desc.c
CommitLineData
56b106ae
GC
1/*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
3
293e4365 4 Copyright (C) 2007-2014 STMicroelectronics Ltd
56b106ae
GC
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
56b106ae
GC
15 The full GNU General Public License is included in this distribution in
16 the file called "COPYING".
17
18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19*******************************************************************************/
20
38912bdb 21#include <linux/stmmac.h>
56b106ae 22#include "common.h"
286a8372 23#include "descs_com.h"
56b106ae
GC
24
25static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
ad01b7d4 26 struct dma_desc *p, void __iomem *ioaddr)
56b106ae 27{
56b106ae 28 struct net_device_stats *stats = (struct net_device_stats *)data;
f8be0d78 29 unsigned int tdes0 = le32_to_cpu(p->des0);
c363b658
FG
30 int ret = tx_done;
31
32 /* Get tx owner first */
33 if (unlikely(tdes0 & ETDES0_OWN))
34 return tx_dma_own;
35
36 /* Verify tx error by looking at the last segment. */
37 if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
38 return tx_not_ls;
56b106ae 39
293e4365
GC
40 if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
41 if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
56b106ae 42 x->tx_jabber++;
56b106ae 43
293e4365 44 if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
56b106ae 45 x->tx_frame_flushed++;
688911c2 46 dwmac_dma_flush_tx_fifo(ioaddr);
56b106ae
GC
47 }
48
293e4365 49 if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
56b106ae
GC
50 x->tx_losscarrier++;
51 stats->tx_carrier_errors++;
52 }
293e4365 53 if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
56b106ae
GC
54 x->tx_carrier++;
55 stats->tx_carrier_errors++;
56 }
293e4365
GC
57 if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
58 (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
59 stats->collisions +=
60 (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
83d7af64 61
293e4365 62 if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
56b106ae 63 x->tx_deferred++;
56b106ae 64
293e4365 65 if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
688911c2 66 dwmac_dma_flush_tx_fifo(ioaddr);
56b106ae
GC
67 x->tx_underflow++;
68 }
69
293e4365 70 if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
56b106ae 71 x->tx_ip_header_error++;
56b106ae 72
293e4365 73 if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
56b106ae 74 x->tx_payload_error++;
688911c2 75 dwmac_dma_flush_tx_fifo(ioaddr);
56b106ae
GC
76 }
77
c363b658 78 ret = tx_err;
56b106ae
GC
79 }
80
293e4365 81 if (unlikely(tdes0 & ETDES0_DEFERRED))
56b106ae 82 x->tx_deferred++;
83d7af64 83
56b106ae 84#ifdef STMMAC_VLAN_TAG_USED
293e4365 85 if (tdes0 & ETDES0_VLAN_FRAME)
56b106ae 86 x->tx_vlan++;
56b106ae
GC
87#endif
88
89 return ret;
90}
91
92static int enh_desc_get_tx_len(struct dma_desc *p)
93{
f8be0d78 94 return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
56b106ae
GC
95}
96
97static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
98{
99 int ret = good_frame;
100 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
101
102 /* bits 5 7 0 | Frame status
103 * ----------------------------------------------------------
104 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
105 * 1 0 0 | IPv4/6 No CSUM errorS.
106 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
107 * 1 1 0 | IPv4/6 CSUM IP HR error
108 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
109 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
110 * 0 1 1 | COE bypassed.. no IPv4/6 frame
111 * 0 1 0 | Reserved.
112 */
83d7af64 113 if (status == 0x0)
3eeb2997 114 ret = llc_snap;
83d7af64 115 else if (status == 0x4)
56b106ae 116 ret = good_frame;
83d7af64 117 else if (status == 0x5)
56b106ae 118 ret = csum_none;
83d7af64 119 else if (status == 0x6)
56b106ae 120 ret = csum_none;
83d7af64 121 else if (status == 0x7)
56b106ae 122 ret = csum_none;
83d7af64 123 else if (status == 0x1)
56b106ae 124 ret = discard_frame;
83d7af64 125 else if (status == 0x3)
56b106ae 126 ret = discard_frame;
56b106ae
GC
127 return ret;
128}
129
c24602ef
GC
130static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
131 struct dma_extended_desc *p)
132{
f8be0d78
MW
133 unsigned int rdes0 = le32_to_cpu(p->basic.des0);
134 unsigned int rdes4 = le32_to_cpu(p->des4);
293e4365
GC
135
136 if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
137 int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
138
139 if (rdes4 & ERDES4_IP_HDR_ERR)
c24602ef 140 x->ip_hdr_err++;
293e4365 141 if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
c24602ef 142 x->ip_payload_err++;
293e4365 143 if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
c24602ef 144 x->ip_csum_bypassed++;
293e4365 145 if (rdes4 & ERDES4_IPV4_PKT_RCVD)
c24602ef 146 x->ipv4_pkt_rcvd++;
293e4365 147 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
c24602ef 148 x->ipv6_pkt_rcvd++;
ee112c12
GC
149
150 if (message_type == RDES_EXT_NO_PTP)
151 x->no_ptp_rx_msg_type_ext++;
152 else if (message_type == RDES_EXT_SYNC)
153 x->ptp_rx_msg_type_sync++;
293e4365 154 else if (message_type == RDES_EXT_FOLLOW_UP)
ee112c12 155 x->ptp_rx_msg_type_follow_up++;
293e4365 156 else if (message_type == RDES_EXT_DELAY_REQ)
ee112c12 157 x->ptp_rx_msg_type_delay_req++;
293e4365 158 else if (message_type == RDES_EXT_DELAY_RESP)
ee112c12 159 x->ptp_rx_msg_type_delay_resp++;
293e4365 160 else if (message_type == RDES_EXT_PDELAY_REQ)
ee112c12 161 x->ptp_rx_msg_type_pdelay_req++;
293e4365 162 else if (message_type == RDES_EXT_PDELAY_RESP)
ee112c12 163 x->ptp_rx_msg_type_pdelay_resp++;
293e4365 164 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
ee112c12
GC
165 x->ptp_rx_msg_type_pdelay_follow_up++;
166 else if (message_type == RDES_PTP_ANNOUNCE)
167 x->ptp_rx_msg_type_announce++;
168 else if (message_type == RDES_PTP_MANAGEMENT)
169 x->ptp_rx_msg_type_management++;
170 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
171 x->ptp_rx_msg_pkt_reserved_type++;
172
293e4365 173 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
c24602ef 174 x->ptp_frame_type++;
293e4365 175 if (rdes4 & ERDES4_PTP_VER)
c24602ef 176 x->ptp_ver++;
293e4365 177 if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
c24602ef 178 x->timestamp_dropped++;
293e4365 179 if (rdes4 & ERDES4_AV_PKT_RCVD)
c24602ef 180 x->av_pkt_rcvd++;
293e4365 181 if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
c24602ef 182 x->av_tagged_pkt_rcvd++;
293e4365 183 if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
c24602ef 184 x->vlan_tag_priority_val++;
293e4365 185 if (rdes4 & ERDES4_L3_FILTER_MATCH)
c24602ef 186 x->l3_filter_match++;
293e4365 187 if (rdes4 & ERDES4_L4_FILTER_MATCH)
c24602ef 188 x->l4_filter_match++;
293e4365 189 if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
c24602ef
GC
190 x->l3_l4_filter_no_match++;
191 }
192}
193
56b106ae
GC
194static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
195 struct dma_desc *p)
196{
56b106ae 197 struct net_device_stats *stats = (struct net_device_stats *)data;
f8be0d78 198 unsigned int rdes0 = le32_to_cpu(p->des0);
293e4365 199 int ret = good_frame;
56b106ae 200
c1fa3212
FG
201 if (unlikely(rdes0 & RDES0_OWN))
202 return dma_own;
203
293e4365
GC
204 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
205 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
56b106ae
GC
206 x->rx_desc++;
207 stats->rx_length_errors++;
208 }
293e4365 209 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
56b106ae 210 x->rx_gmac_overflow++;
56b106ae 211
293e4365 212 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
83d7af64 213 pr_err("\tIPC Csum Error/Giant frame\n");
56b106ae 214
293e4365 215 if (unlikely(rdes0 & RDES0_COLLISION))
56b106ae 216 stats->collisions++;
293e4365 217 if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
56b106ae 218 x->rx_watchdog++;
83d7af64 219
293e4365 220 if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */
56b106ae 221 x->rx_mii++;
83d7af64 222
293e4365 223 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
e0a76606 224 x->rx_crc_errors++;
56b106ae
GC
225 stats->rx_crc_errors++;
226 }
227 ret = discard_frame;
228 }
229
230 /* After a payload csum error, the ES bit is set.
231 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */
293e4365
GC
234 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
235 !!(rdes0 & RDES0_FRAME_TYPE),
236 !!(rdes0 & ERDES0_RX_MAC_ADDR));
56b106ae 237
293e4365 238 if (unlikely(rdes0 & RDES0_DRIBBLING))
1cc5a735 239 x->dribbling_bit++;
83d7af64 240
293e4365 241 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
56b106ae
GC
242 x->sa_rx_filter_fail++;
243 ret = discard_frame;
244 }
293e4365 245 if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
56b106ae
GC
246 x->da_rx_filter_fail++;
247 ret = discard_frame;
248 }
293e4365 249 if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
56b106ae
GC
250 x->rx_length++;
251 ret = discard_frame;
252 }
253#ifdef STMMAC_VLAN_TAG_USED
293e4365 254 if (rdes0 & RDES0_VLAN_TAG)
56b106ae 255 x->rx_vlan++;
56b106ae 256#endif
c24602ef 257
56b106ae
GC
258 return ret;
259}
260
c24602ef
GC
261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end)
56b106ae 263{
f8be0d78
MW
264 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
286a8372 266
c24602ef 267 if (mode == STMMAC_CHAIN_MODE)
293e4365 268 ehn_desc_rx_set_on_chain(p);
c24602ef
GC
269 else
270 ehn_desc_rx_set_on_ring(p, end);
286a8372 271
c24602ef 272 if (disable_rx_ic)
f8be0d78 273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
56b106ae
GC
274}
275
c24602ef 276static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
56b106ae 277{
f8be0d78 278 p->des0 &= cpu_to_le32(~ETDES0_OWN);
c24602ef 279 if (mode == STMMAC_CHAIN_MODE)
293e4365 280 enh_desc_end_tx_desc_on_chain(p);
c24602ef 281 else
293e4365 282 enh_desc_end_tx_desc_on_ring(p, end);
56b106ae
GC
283}
284
285static int enh_desc_get_tx_owner(struct dma_desc *p)
286{
f8be0d78 287 return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
56b106ae
GC
288}
289
56b106ae
GC
290static void enh_desc_set_tx_owner(struct dma_desc *p)
291{
f8be0d78 292 p->des0 |= cpu_to_le32(ETDES0_OWN);
56b106ae
GC
293}
294
295static void enh_desc_set_rx_owner(struct dma_desc *p)
296{
f8be0d78 297 p->des0 |= cpu_to_le32(RDES0_OWN);
56b106ae
GC
298}
299
300static int enh_desc_get_tx_ls(struct dma_desc *p)
301{
f8be0d78 302 return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
56b106ae
GC
303}
304
4a7d666a 305static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
56b106ae 306{
f8be0d78 307 int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
56b106ae 308
b71c7aaa 309 memset(p, 0, offsetof(struct dma_desc, des2));
4a7d666a 310 if (mode == STMMAC_CHAIN_MODE)
293e4365 311 enh_desc_end_tx_desc_on_chain(p);
4a7d666a
GC
312 else
313 enh_desc_end_tx_desc_on_ring(p, ter);
56b106ae
GC
314}
315
316static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
be434d50 317 bool csum_flag, int mode, bool tx_own,
fe6af0e1 318 bool ls, unsigned int tot_pkt_len)
56b106ae 319{
f8be0d78 320 unsigned int tdes0 = le32_to_cpu(p->des0);
293e4365 321
0e80bdc9
GC
322 if (mode == STMMAC_CHAIN_MODE)
323 enh_set_tx_desc_len_on_chain(p, len);
324 else
325 enh_set_tx_desc_len_on_ring(p, len);
326
293e4365
GC
327 if (is_fs)
328 tdes0 |= ETDES0_FIRST_SEGMENT;
329 else
330 tdes0 &= ~ETDES0_FIRST_SEGMENT;
331
332 if (likely(csum_flag))
333 tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
334 else
335 tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
336
0e80bdc9
GC
337 if (ls)
338 tdes0 |= ETDES0_LAST_SEGMENT;
339
340 /* Finally set the OWN bit. Later the DMA will start! */
be434d50
GC
341 if (tx_own)
342 tdes0 |= ETDES0_OWN;
343
344 if (is_fs & tx_own)
345 /* When the own bit, for the first frame, has to be set, all
346 * descriptors for the same frame has to be set before, to
347 * avoid race condition.
348 */
ad688cdb 349 dma_wmb();
be434d50 350
f8be0d78 351 p->des0 = cpu_to_le32(tdes0);
56b106ae
GC
352}
353
0e80bdc9 354static void enh_desc_set_tx_ic(struct dma_desc *p)
56b106ae 355{
f8be0d78 356 p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
56b106ae
GC
357}
358
38912bdb 359static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
56b106ae 360{
293e4365 361 unsigned int csum = 0;
38912bdb
DS
362 /* The type-1 checksum offload engines append the checksum at
363 * the end of frame and the two bytes of checksum are added in
364 * the length.
365 * Adjust for that in the framelen for type-1 checksum offload
293e4365
GC
366 * engines.
367 */
38912bdb 368 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
293e4365
GC
369 csum = 2;
370
f8be0d78
MW
371 return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
372 >> RDES0_FRAME_LEN_SHIFT) - csum);
56b106ae
GC
373}
374
891434b1
RK
375static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
376{
f8be0d78 377 p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
891434b1
RK
378}
379
380static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
381{
f8be0d78 382 return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
891434b1
RK
383}
384
385static u64 enh_desc_get_timestamp(void *desc, u32 ats)
386{
387 u64 ns;
388
389 if (ats) {
390 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
f8be0d78 391 ns = le32_to_cpu(p->des6);
891434b1 392 /* convert high/sec time stamp value to nanosecond */
f8be0d78 393 ns += le32_to_cpu(p->des7) * 1000000000ULL;
891434b1
RK
394 } else {
395 struct dma_desc *p = (struct dma_desc *)desc;
f8be0d78
MW
396 ns = le32_to_cpu(p->des2);
397 ns += le32_to_cpu(p->des3) * 1000000000ULL;
891434b1
RK
398 }
399
400 return ns;
401}
402
11be1c24
FH
403static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
404 u32 ats)
891434b1
RK
405{
406 if (ats) {
407 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
f8be0d78 408 return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
891434b1
RK
409 } else {
410 struct dma_desc *p = (struct dma_desc *)desc;
f8be0d78
MW
411 if ((le32_to_cpu(p->des2) == 0xffffffff) &&
412 (le32_to_cpu(p->des3) == 0xffffffff))
891434b1
RK
413 /* timestamp is corrupted, hence don't store it */
414 return 0;
415 else
416 return 1;
417 }
418}
419
d0225e7d
AT
420static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
421{
422 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
423 int i;
424
425 pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
426
427 for (i = 0; i < size; i++) {
428 u64 x;
429
430 x = *(u64 *)ep;
431 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
432 i, (unsigned int)virt_to_phys(ep),
433 (unsigned int)x, (unsigned int)(x >> 32),
434 ep->basic.des2, ep->basic.des3);
435 ep++;
436 }
437 pr_info("\n");
438}
439
cadb7924 440const struct stmmac_desc_ops enh_desc_ops = {
56b106ae
GC
441 .tx_status = enh_desc_get_tx_status,
442 .rx_status = enh_desc_get_rx_status,
443 .get_tx_len = enh_desc_get_tx_len,
444 .init_rx_desc = enh_desc_init_rx_desc,
445 .init_tx_desc = enh_desc_init_tx_desc,
446 .get_tx_owner = enh_desc_get_tx_owner,
56b106ae
GC
447 .release_tx_desc = enh_desc_release_tx_desc,
448 .prepare_tx_desc = enh_desc_prepare_tx_desc,
0e80bdc9 449 .set_tx_ic = enh_desc_set_tx_ic,
56b106ae
GC
450 .get_tx_ls = enh_desc_get_tx_ls,
451 .set_tx_owner = enh_desc_set_tx_owner,
452 .set_rx_owner = enh_desc_set_rx_owner,
453 .get_rx_frame_len = enh_desc_get_rx_frame_len,
c24602ef 454 .rx_extended_status = enh_desc_get_ext_status,
891434b1
RK
455 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
456 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
457 .get_timestamp = enh_desc_get_timestamp,
458 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
d0225e7d 459 .display_ring = enh_desc_display_ring,
56b106ae 460};