source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / dhd_msgbuf.c
1 /**
2 * @file definition of host message ring functionality
3 * Provides type definitions and function prototypes used to link the
4 * DHD OS, bus, and protocol modules.
5 *
6 * Copyright (C) 1999-2019, Broadcom.
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: dhd_msgbuf.c 818370 2019-05-07 11:39:47Z $
30 */
31
32 #include <typedefs.h>
33 #include <osl.h>
34
35 #include <bcmutils.h>
36 #include <bcmmsgbuf.h>
37 #include <bcmendian.h>
38
39 #include <dngl_stats.h>
40 #include <dhd.h>
41 #include <dhd_proto.h>
42
43 #include <dhd_bus.h>
44
45 #include <dhd_dbg.h>
46 #include <siutils.h>
47 #include <dhd_debug.h>
48
49 #include <dhd_flowring.h>
50
51 #include <pcie_core.h>
52 #include <bcmpcie.h>
53 #include <dhd_pcie.h>
54
55 #if defined(DHD_LB)
56 #include <linux/cpu.h>
57 #include <bcm_ring.h>
58 #define DHD_LB_WORKQ_SZ (8192)
59 #define DHD_LB_WORKQ_SYNC (16)
60 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
61 #endif /* DHD_LB */
62
63 #include <etd.h>
64 #include <hnd_debug.h>
65 #include <bcmtlv.h>
66 #include <hnd_armtrap.h>
67 #include <dnglevent.h>
68
69 #ifdef DHD_PKT_LOGGING
70 #include <dhd_pktlog.h>
71 #endif /* DHD_PKT_LOGGING */
72
73 extern char dhd_version[];
74 extern char fw_version[];
75
76 /**
77 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
78 * address where a value must be written. Host may also interrupt coalescing
79 * on this soft doorbell.
80 * Use Case: Hosts with network processors, may register with the dongle the
81 * network processor's thread wakeup register and a value corresponding to the
82 * core/thread context. Dongle will issue a write transaction <address,value>
83 * to the PCIE RC which will need to be routed to the mapped register space, by
84 * the host.
85 */
86 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
87
88 /* Dependency Check */
89 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
90 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
91 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
92
93 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
94
95 #define DEFAULT_RX_BUFFERS_TO_POST 256
96 #define RXBUFPOST_THRESHOLD 32
97 #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
98
99 #define DHD_STOP_QUEUE_THRESHOLD 200
100 #define DHD_START_QUEUE_THRESHOLD 100
101
102 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
103 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
104
105 /* flags for ioctl pending status */
106 #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
107 #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
108
109 #define DHD_IOCTL_REQ_PKTBUFSZ 2048
110 #define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
111
112 #define DMA_ALIGN_LEN 4
113
114 #define DMA_D2H_SCRATCH_BUF_LEN 8
115 #define DMA_XFER_LEN_LIMIT 0x400000
116
117 #ifdef BCM_HOST_BUF
118 #ifndef DMA_HOST_BUFFER_LEN
119 #define DMA_HOST_BUFFER_LEN 0x200000
120 #endif // endif
121 #endif /* BCM_HOST_BUF */
122
123 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
124
125 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
126 #define DHD_FLOWRING_MAX_EVENTBUF_POST 32
127 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
128 #define DHD_H2D_INFORING_MAX_BUF_POST 32
129 #define DHD_MAX_TSBUF_POST 8
130
131 #define DHD_PROT_FUNCS 43
132
133 /* Length of buffer in host for bus throughput measurement */
134 #define DHD_BUS_TPUT_BUF_LEN 2048
135
136 #define TXP_FLUSH_NITEMS
137
138 /* optimization to write "n" tx items at a time to ring */
139 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
140
141 #define RING_NAME_MAX_LENGTH 24
142 #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
143 /* Giving room before ioctl_trans_id rollsover. */
144 #define BUFFER_BEFORE_ROLLOVER 300
145
146 /* 512K memory + 32K registers */
147 #define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
148
149 struct msgbuf_ring; /* ring context for common and flow rings */
150
151 /**
152 * PCIE D2H DMA Complete Sync Modes
153 *
154 * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
155 * Host system memory. A WAR using one of 3 approaches is needed:
156 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
157 * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
158 * writes in the last word of each work item. Each work item has a seqnum
159 * number = sequence num % 253.
160 *
161 * 3. Read Barrier: Dongle does a host memory read access prior to posting an
162 * interrupt, ensuring that D2H data transfer indeed completed.
163 * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
164 * ring contents before the indices.
165 *
166 * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
167 * callback (see dhd_prot_d2h_sync_none) may be bound.
168 *
169 * Dongle advertizes host side sync mechanism requirements.
170 */
171
172 #define PCIE_D2H_SYNC_WAIT_TRIES (512U)
173 #define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
174 #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
175
176 /**
177 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
178 *
179 * On success: return cmn_msg_hdr_t::msg_type
180 * On failure: return 0 (invalid msg_type)
181 */
182 typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
183 volatile cmn_msg_hdr_t *msg, int msglen);
184
185 /*
186 * +----------------------------------------------------------------------------
187 *
188 * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
189 * flowids do not.
190 *
191 * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
192 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
193 *
194 * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
195 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
196 * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
197 *
198 * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
199 * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
200 *
201 * D2H Control Complete RingId = 2
202 * D2H Transmit Complete RingId = 3
203 * D2H Receive Complete RingId = 4
204 *
205 * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
206 * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
207 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
208 *
209 * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
210 * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
211 *
212 * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
213 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
214 * FlowId values would be in the range [2..133] and the corresponding
215 * RingId values would be in the range [5..136].
216 *
217 * The flowId allocator, may chose to, allocate Flowids:
218 * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
219 * X# of uc flowids in consecutive ranges (per station Id), where X is the
220 * packet's access category (e.g. 4 uc flowids per station).
221 *
222 * CAUTION:
223 * When DMA indices array feature is used, RingId=5, corresponding to the 0th
224 * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
225 * since the FlowId truly represents the index in the H2D DMA indices array.
226 *
227 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
228 * will represent the index in the D2H DMA indices array.
229 *
230 * +----------------------------------------------------------------------------
231 */
232
233 /* First TxPost Flowring Id */
234 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
235
236 /* Determine whether a ringid belongs to a TxPost flowring */
237 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
238 ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
239 (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
240
241 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
242 #define DHD_FLOWID_TO_RINGID(flowid) \
243 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
244
245 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
246 #define DHD_RINGID_TO_FLOWID(ringid) \
247 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
248
249 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
250 * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
251 * any array of H2D rings.
252 */
253 #define DHD_H2D_RING_OFFSET(ringid) \
254 (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
255
256 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
257 * This may be used for IFRM.
258 */
259 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
260 ((ringid) - BCMPCIE_COMMON_MSGRINGS)
261
262 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
263 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
264 * any array of D2H rings.
265 * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
266 * max_h2d_rings: total number of h2d rings
267 */
268 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
269 ((ringid) > (max_h2d_rings) ? \
270 ((ringid) - max_h2d_rings) : \
271 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
272
273 /* Convert a D2H DMA Indices Offset to a RingId */
274 #define DHD_D2H_RINGID(offset) \
275 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
276
277 #define DHD_DMAH_NULL ((void*)NULL)
278
279 /*
280 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
281 * buffer does not occupy the entire cacheline, and another object is placed
282 * following the DMA-able buffer, data corruption may occur if the DMA-able
283 * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
284 * is not available.
285 */
286 #if defined(L1_CACHE_BYTES)
287 #define DHD_DMA_PAD (L1_CACHE_BYTES)
288 #else
289 #define DHD_DMA_PAD (128)
290 #endif // endif
291
292 /* Used in loopback tests */
293 typedef struct dhd_dmaxfer {
294 dhd_dma_buf_t srcmem;
295 dhd_dma_buf_t dstmem;
296 uint32 srcdelay;
297 uint32 destdelay;
298 uint32 len;
299 bool in_progress;
300 uint64 start_usec;
301 uint32 d11_lpbk;
302 int status;
303 } dhd_dmaxfer_t;
304
305 /**
306 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
307 * buffer, the WR and RD indices, ring parameters such as max number of items
308 * an length of each items, and other miscellaneous runtime state.
309 * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
310 * H2D TxPost ring as specified in the PCIE FullDongle Spec.
311 * Ring parameters are conveyed to the dongle, which maintains its own peer end
312 * ring state. Depending on whether the DMA Indices feature is supported, the
313 * host will update the WR/RD index in the DMA indices array in host memory or
314 * directly in dongle memory.
315 */
316 typedef struct msgbuf_ring {
317 bool inited;
318 uint16 idx; /* ring id */
319 uint16 rd; /* read index */
320 uint16 curr_rd; /* read index for debug */
321 uint16 wr; /* write index */
322 uint16 max_items; /* maximum number of items in ring */
323 uint16 item_len; /* length of each item in the ring */
324 sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
325 dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
326 uint32 seqnum; /* next expected item's sequence number */
327 #ifdef TXP_FLUSH_NITEMS
328 void *start_addr;
329 /* # of messages on ring not yet announced to dongle */
330 uint16 pend_items_count;
331 #endif /* TXP_FLUSH_NITEMS */
332
333 uint8 ring_type;
334 uint8 n_completion_ids;
335 bool create_pending;
336 uint16 create_req_id;
337 uint8 current_phase;
338 uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
339 uchar name[RING_NAME_MAX_LENGTH];
340 uint32 ring_mem_allocated;
341 void *ring_lock;
342 } msgbuf_ring_t;
343
344 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
345 #define DHD_RING_END_VA(ring) \
346 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
347 (((ring)->max_items - 1) * (ring)->item_len))
348
349 /* This can be overwritten by module parameter defined in dhd_linux.c
350 * or by dhd iovar h2d_max_txpost.
351 */
352 int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
353
354 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
355 typedef struct dhd_prot {
356 osl_t *osh; /* OSL handle */
357 uint16 rxbufpost_sz;
358 uint16 rxbufpost;
359 uint16 max_rxbufpost;
360 uint16 max_eventbufpost;
361 uint16 max_ioctlrespbufpost;
362 uint16 max_tsbufpost;
363 uint16 max_infobufpost;
364 uint16 infobufpost;
365 uint16 cur_event_bufs_posted;
366 uint16 cur_ioctlresp_bufs_posted;
367 uint16 cur_ts_bufs_posted;
368
369 /* Flow control mechanism based on active transmits pending */
370 osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
371 uint16 h2d_max_txpost;
372 uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
373
374 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
375 msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
376 msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
377 msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
378 msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
379 msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
380 msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
381 msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
382
383 msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
384 dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
385 uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
386
387 uint32 rx_dataoffset;
388
389 dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
390 dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
391
392 /* ioctl related resources */
393 uint8 ioctl_state;
394 int16 ioctl_status; /* status returned from dongle */
395 uint16 ioctl_resplen;
396 dhd_ioctl_recieved_status_t ioctl_received;
397 uint curr_ioctl_cmd;
398 dhd_dma_buf_t retbuf; /* For holding ioctl response */
399 dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
400
401 dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
402
403 /* DMA-able arrays for holding WR and RD indices */
404 uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
405 dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
406 dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
407 dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
408 dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
409 dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
410
411 dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
412
413 dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
414 uint32 flowring_num;
415
416 d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
417 ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
418 ulong d2h_sync_wait_tot; /* total wait loops */
419
420 dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
421
422 uint16 ioctl_seq_no;
423 uint16 data_seq_no;
424 uint16 ioctl_trans_id;
425 void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
426 void *pktid_rx_map; /* pktid map for rx path */
427 void *pktid_tx_map; /* pktid map for tx path */
428 bool metadata_dbg;
429 void *pktid_map_handle_ioctl;
430 #ifdef DHD_MAP_PKTID_LOGGING
431 void *pktid_dma_map; /* pktid map for DMA MAP */
432 void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
433 #endif /* DHD_MAP_PKTID_LOGGING */
434
435 uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
436 uint64 ioctl_ack_time; /* timestamp for ioctl ack */
437 uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
438
439 /* Applications/utilities can read tx and rx metadata using IOVARs */
440 uint16 rx_metadata_offset;
441 uint16 tx_metadata_offset;
442
443 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
444 /* Host's soft doorbell configuration */
445 bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
446 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
447
448 /* Work Queues to be used by the producer and the consumer, and threshold
449 * when the WRITE index must be synced to consumer's workq
450 */
451 #if defined(DHD_LB_TXC)
452 uint32 tx_compl_prod_sync ____cacheline_aligned;
453 bcm_workq_t tx_compl_prod, tx_compl_cons;
454 #endif /* DHD_LB_TXC */
455 #if defined(DHD_LB_RXC)
456 uint32 rx_compl_prod_sync ____cacheline_aligned;
457 bcm_workq_t rx_compl_prod, rx_compl_cons;
458 #endif /* DHD_LB_RXC */
459
460 dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
461
462 uint32 host_ipc_version; /* Host sypported IPC rev */
463 uint32 device_ipc_version; /* FW supported IPC rev */
464 uint32 active_ipc_version; /* Host advertised IPC rev */
465 dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
466 bool hostts_req_buf_inuse;
467 bool rx_ts_log_enabled;
468 bool tx_ts_log_enabled;
469 bool no_retry;
470 bool no_aggr;
471 bool fixed_rate;
472 } dhd_prot_t;
473
474 #ifdef DHD_DUMP_PCIE_RINGS
475 static
476 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn);
477 #endif /* DHD_DUMP_PCIE_RINGS */
478
479 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
480 extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
481 /* Convert a dmaaddr_t to a base_addr with htol operations */
482 static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
483
484 /* APIs for managing a DMA-able buffer */
485 static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
486 static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
487 static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
488 static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
489
490 /* msgbuf ring management */
491 static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
492 const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
493 static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
494 static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
495 static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
496 static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
497
498 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
499 static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
500 static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
501 static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
502
503 /* Fetch and Release a flowring msgbuf_ring from flowring pool */
504 static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
505 uint16 flowid);
506 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
507
508 /* Producer: Allocate space in a msgbuf ring */
509 static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
510 uint16 nitems, uint16 *alloced, bool exactly_nitems);
511 static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
512 uint16 *alloced, bool exactly_nitems);
513
514 /* Consumer: Determine the location where the next message may be consumed */
515 static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
516 uint32 *available_len);
517
518 /* Producer (WR index update) or Consumer (RD index update) indication */
519 static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
520 void *p, uint16 len);
521 static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
522
523 static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
524 dhd_dma_buf_t *dma_buf, uint32 bufsz);
525
526 /* Set/Get a RD or WR index in the array of indices */
527 /* See also: dhd_prot_dma_indx_init() */
528 void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
529 uint16 ringid);
530 static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
531
532 /* Locate a packet given a pktid */
533 static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
534 bool free_pktid);
535 /* Locate a packet given a PktId and free it. */
536 static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
537
538 static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
539 void *buf, uint len, uint8 action);
540 static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
541 void *buf, uint len, uint8 action);
542 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
543 static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
544 void *buf, int ifidx);
545
546 /* Post buffers for Rx, control ioctl response and events */
547 static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
548 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
549 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
550 static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
551 static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
552 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
553
554 static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
555
556 /* D2H Message handling */
557 static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
558
559 /* D2H Message handlers */
560 static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
561 static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
562 static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
563 static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
564 static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
565 static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
566 static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
567
568 /* Loopback test with dongle */
569 static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
570 static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
571 uint destdelay, dhd_dmaxfer_t *dma);
572 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
573
574 /* Flowring management communication with dongle */
575 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
576 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
577 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
578 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
579 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
580
581 /* Monitor Mode */
582 #ifdef WL_MONITOR
583 extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
584 extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
585 #endif /* WL_MONITOR */
586
587 /* Configure a soft doorbell per D2H ring */
588 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
589 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
590 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
591 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
592 static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
593 static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
594 static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
595 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
596 static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
597
598 typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
599
600 /** callback functions for messages generated by the dongle */
601 #define MSG_TYPE_INVALID 0
602
603 static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
604 dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
605 dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
606 dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
607 NULL,
608 dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
609 NULL,
610 dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
611 NULL,
612 dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
613 NULL,
614 dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
615 NULL,
616 dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
617 NULL,
618 dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
619 NULL,
620 dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
621 NULL,
622 NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
623 NULL,
624 dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
625 NULL, /* MSG_TYPE_FLOW_RING_RESUME */
626 dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
627 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
628 dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
629 NULL, /* MSG_TYPE_INFO_BUF_POST */
630 dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
631 NULL, /* MSG_TYPE_H2D_RING_CREATE */
632 NULL, /* MSG_TYPE_D2H_RING_CREATE */
633 dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
634 dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
635 NULL, /* MSG_TYPE_H2D_RING_CONFIG */
636 NULL, /* MSG_TYPE_D2H_RING_CONFIG */
637 NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
638 dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
639 NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
640 dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
641 NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
642 NULL, /* MSG_TYPE_HOSTTIMSTAMP */
643 dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
644 dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
645 NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
646 dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
647 };
648
649 #ifdef DHD_RX_CHAINING
650
651 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
652 (dhd_wet_chainable(dhd) && \
653 dhd_rx_pkt_chainable((dhd), (ifidx)) && \
654 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
655 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
656 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
657 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
658 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
659 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
660 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
661
662 static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
663 static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
664 static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
665
666 #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
667
668 #endif /* DHD_RX_CHAINING */
669
670 #define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
671
672 static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
673
674 #ifdef D2H_MINIDUMP
675 dhd_dma_buf_t *
676 dhd_prot_get_minidump_buf(dhd_pub_t *dhd)
677 {
678 return &dhd->prot->fw_trap_buf;
679 }
680 #endif /* D2H_MINIDUMP */
681
682 bool
683 dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
684 {
685 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
686 uint16 rd, wr;
687 bool ret;
688
689 if (dhd->dma_d2h_ring_upd_support) {
690 wr = flow_ring->wr;
691 } else {
692 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
693 }
694 if (dhd->dma_h2d_ring_upd_support) {
695 rd = flow_ring->rd;
696 } else {
697 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
698 }
699 ret = (wr == rd) ? TRUE : FALSE;
700 return ret;
701 }
702
703 void
704 dhd_prot_dump_ring_ptrs(void *prot_info)
705 {
706 msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
707 DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
708 ring->curr_rd, ring->rd, ring->wr));
709 }
710
711 uint16
712 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
713 {
714 return (uint16)h2d_max_txpost;
715 }
716 void
717 dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
718 {
719 h2d_max_txpost = max_txpost;
720 }
721 /**
722 * D2H DMA to completion callback handlers. Based on the mode advertised by the
723 * dongle through the PCIE shared region, the appropriate callback will be
724 * registered in the proto layer to be invoked prior to precessing any message
725 * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
726 * does not require host participation, then a noop callback handler will be
727 * bound that simply returns the msg_type.
728 */
729 static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
730 uint32 tries, volatile uchar *msg, int msglen);
731 static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
732 volatile cmn_msg_hdr_t *msg, int msglen);
733 static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
734 volatile cmn_msg_hdr_t *msg, int msglen);
735 static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
736 volatile cmn_msg_hdr_t *msg, int msglen);
737 static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
738 static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
739 uint16 ring_type, uint32 id);
740 static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
741 uint8 type, uint32 id);
742 static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
743
744 /**
745 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
746 * not completed, a livelock condition occurs. Host will avert this livelock by
747 * dropping this message and moving to the next. This dropped message can lead
748 * to a packet leak, or even something disastrous in the case the dropped
749 * message happens to be a control response.
750 * Here we will log this condition. One may choose to reboot the dongle.
751 *
752 */
753 static void
754 dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
755 volatile uchar *msg, int msglen)
756 {
757 uint32 ring_seqnum = ring->seqnum;
758
759 DHD_ERROR((
760 "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
761 " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
762 dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
763 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
764 ring->dma_buf.va, msg, ring->curr_rd));
765
766 dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
767
768 dhd_bus_dump_console_buffer(dhd->bus);
769 dhd_prot_debug_info_print(dhd);
770
771 #ifdef DHD_FW_COREDUMP
772 if (dhd->memdump_enabled) {
773 /* collect core dump */
774 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
775 dhd_bus_mem_dump(dhd);
776 }
777 #endif /* DHD_FW_COREDUMP */
778
779 dhd_schedule_reset(dhd);
780
781 #ifdef SUPPORT_LINKDOWN_RECOVERY
782 #ifdef CONFIG_ARCH_MSM
783 dhd->bus->no_cfg_restore = 1;
784 #endif /* CONFIG_ARCH_MSM */
785 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
786 dhd_os_send_hang_message(dhd);
787 #endif /* SUPPORT_LINKDOWN_RECOVERY */
788 }
789
790 /**
791 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
792 * mode. Sequence number is always in the last word of a message.
793 */
794 static uint8 BCMFASTPATH
795 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
796 volatile cmn_msg_hdr_t *msg, int msglen)
797 {
798 uint32 tries;
799 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
800 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
801 volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
802 dhd_prot_t *prot = dhd->prot;
803 uint32 msg_seqnum;
804 uint32 step = 0;
805 uint32 delay = PCIE_D2H_SYNC_DELAY;
806 uint32 total_tries = 0;
807
808 ASSERT(msglen == ring->item_len);
809
810 BCM_REFERENCE(delay);
811 /*
812 * For retries we have to make some sort of stepper algorithm.
813 * We see that every time when the Dongle comes out of the D3
814 * Cold state, the first D2H mem2mem DMA takes more time to
815 * complete, leading to livelock issues.
816 *
817 * Case 1 - Apart from Host CPU some other bus master is
818 * accessing the DDR port, probably page close to the ring
819 * so, PCIE does not get a change to update the memory.
820 * Solution - Increase the number of tries.
821 *
822 * Case 2 - The 50usec delay given by the Host CPU is not
823 * sufficient for the PCIe RC to start its work.
824 * In this case the breathing time of 50usec given by
825 * the Host CPU is not sufficient.
826 * Solution: Increase the delay in a stepper fashion.
827 * This is done to ensure that there are no
828 * unwanted extra delay introdcued in normal conditions.
829 */
830 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
831 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
832 msg_seqnum = *marker;
833 if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
834 ring->seqnum++; /* next expected sequence number */
835 goto dma_completed;
836 }
837
838 total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
839
840 if (total_tries > prot->d2h_sync_wait_max)
841 prot->d2h_sync_wait_max = total_tries;
842
843 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
844 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
845 OSL_DELAY(delay * step); /* Add stepper delay */
846
847 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
848 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
849
850 dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
851 (volatile uchar *) msg, msglen);
852
853 ring->seqnum++; /* skip this message ... leak of a pktid */
854 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
855
856 dma_completed:
857
858 prot->d2h_sync_wait_tot += tries;
859 return msg->msg_type;
860 }
861
862 /**
863 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
864 * mode. The xorcsum is placed in the last word of a message. Dongle will also
865 * place a seqnum in the epoch field of the cmn_msg_hdr.
866 */
867 static uint8 BCMFASTPATH
868 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
869 volatile cmn_msg_hdr_t *msg, int msglen)
870 {
871 uint32 tries;
872 uint32 prot_checksum = 0; /* computed checksum */
873 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
874 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
875 dhd_prot_t *prot = dhd->prot;
876 uint32 step = 0;
877 uint32 delay = PCIE_D2H_SYNC_DELAY;
878 uint32 total_tries = 0;
879
880 ASSERT(msglen == ring->item_len);
881
882 BCM_REFERENCE(delay);
883 /*
884 * For retries we have to make some sort of stepper algorithm.
885 * We see that every time when the Dongle comes out of the D3
886 * Cold state, the first D2H mem2mem DMA takes more time to
887 * complete, leading to livelock issues.
888 *
889 * Case 1 - Apart from Host CPU some other bus master is
890 * accessing the DDR port, probably page close to the ring
891 * so, PCIE does not get a change to update the memory.
892 * Solution - Increase the number of tries.
893 *
894 * Case 2 - The 50usec delay given by the Host CPU is not
895 * sufficient for the PCIe RC to start its work.
896 * In this case the breathing time of 50usec given by
897 * the Host CPU is not sufficient.
898 * Solution: Increase the delay in a stepper fashion.
899 * This is done to ensure that there are no
900 * unwanted extra delay introdcued in normal conditions.
901 */
902 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
903 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
904 /* First verify if the seqnumber has been update,
905 * if yes, then only check xorcsum.
906 * Once seqnum and xorcsum is proper that means
907 * complete message has arrived.
908 */
909 if (msg->epoch == ring_seqnum) {
910 prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
911 num_words);
912 if (prot_checksum == 0U) { /* checksum is OK */
913 ring->seqnum++; /* next expected sequence number */
914 goto dma_completed;
915 }
916 }
917
918 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
919
920 if (total_tries > prot->d2h_sync_wait_max)
921 prot->d2h_sync_wait_max = total_tries;
922
923 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
924 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
925 OSL_DELAY(delay * step); /* Add stepper delay */
926
927 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
928 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
929
930 DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
931 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
932 (volatile uchar *) msg, msglen);
933
934 ring->seqnum++; /* skip this message ... leak of a pktid */
935 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
936
937 dma_completed:
938
939 prot->d2h_sync_wait_tot += tries;
940 return msg->msg_type;
941 }
942
943 /**
944 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
945 * need to try to sync. This noop sync handler will be bound when the dongle
946 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
947 */
948 static uint8 BCMFASTPATH
949 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
950 volatile cmn_msg_hdr_t *msg, int msglen)
951 {
952 return msg->msg_type;
953 }
954
955 INLINE void
956 dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
957 {
958 /* To synchronize with the previous memory operations call wmb() */
959 OSL_SMP_WMB();
960 dhd->prot->ioctl_received = reason;
961 /* Call another wmb() to make sure before waking up the other event value gets updated */
962 OSL_SMP_WMB();
963 dhd_os_ioctl_resp_wake(dhd);
964 }
965
966 /**
967 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
968 * dongle advertizes.
969 */
970 static void
971 dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
972 {
973 dhd_prot_t *prot = dhd->prot;
974 prot->d2h_sync_wait_max = 0UL;
975 prot->d2h_sync_wait_tot = 0UL;
976
977 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
978 prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
979
980 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
981 prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
982
983 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
984 prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
985
986 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
987 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
988 DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
989 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
990 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
991 DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
992 } else {
993 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
994 DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
995 }
996 }
997
998 /**
999 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1000 */
1001 static void
1002 dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
1003 {
1004 dhd_prot_t *prot = dhd->prot;
1005 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
1006 prot->h2dring_rxp_subn.current_phase = 0;
1007
1008 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
1009 prot->h2dring_ctrl_subn.current_phase = 0;
1010 }
1011
1012 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
1013
1014 /*
1015 * +---------------------------------------------------------------------------+
1016 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1017 * virtual and physical address, the buffer lenght and the DMA handler.
1018 * A secdma handler is also included in the dhd_dma_buf object.
1019 * +---------------------------------------------------------------------------+
1020 */
1021
1022 static INLINE void
1023 dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
1024 {
1025 base_addr->low_addr = htol32(PHYSADDRLO(pa));
1026 base_addr->high_addr = htol32(PHYSADDRHI(pa));
1027 }
1028
1029 /**
1030 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1031 */
1032 static int
1033 dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1034 {
1035 uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
1036 ASSERT(dma_buf);
1037 pa_lowaddr = PHYSADDRLO(dma_buf->pa);
1038 ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
1039 ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
1040 ASSERT(dma_buf->len != 0);
1041
1042 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1043 end = (pa_lowaddr + dma_buf->len); /* end address */
1044
1045 if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1046 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1047 __FUNCTION__, pa_lowaddr, dma_buf->len));
1048 return BCME_ERROR;
1049 }
1050
1051 return BCME_OK;
1052 }
1053
1054 /**
1055 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1056 * returns BCME_OK=0 on success
1057 * returns non-zero negative error value on failure.
1058 */
1059 static int
1060 dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1061 {
1062 uint32 dma_pad = 0;
1063 osl_t *osh = dhd->osh;
1064 uint16 dma_align = DMA_ALIGN_LEN;
1065
1066 ASSERT(dma_buf != NULL);
1067 ASSERT(dma_buf->va == NULL);
1068 ASSERT(dma_buf->len == 0);
1069
1070 /* Pad the buffer length by one extra cacheline size.
1071 * Required for D2H direction.
1072 */
1073 dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
1074 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1075 dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1076
1077 if (dma_buf->va == NULL) {
1078 DHD_ERROR(("%s: buf_len %d, no memory available\n",
1079 __FUNCTION__, buf_len));
1080 return BCME_NOMEM;
1081 }
1082
1083 dma_buf->len = buf_len; /* not including padded len */
1084
1085 if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1086 dhd_dma_buf_free(dhd, dma_buf);
1087 return BCME_ERROR;
1088 }
1089
1090 dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1091
1092 return BCME_OK;
1093 }
1094
1095 /**
1096 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1097 */
1098 static void
1099 dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1100 {
1101 if ((dma_buf == NULL) || (dma_buf->va == NULL))
1102 return;
1103
1104 (void)dhd_dma_buf_audit(dhd, dma_buf);
1105
1106 /* Zero out the entire buffer and cache flush */
1107 memset((void*)dma_buf->va, 0, dma_buf->len);
1108 OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1109 }
1110
1111 /**
1112 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1113 * dhd_dma_buf_alloc().
1114 */
1115 static void
1116 dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1117 {
1118 osl_t *osh = dhd->osh;
1119
1120 ASSERT(dma_buf);
1121
1122 if (dma_buf->va == NULL)
1123 return; /* Allow for free invocation, when alloc failed */
1124
1125 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1126 (void)dhd_dma_buf_audit(dhd, dma_buf);
1127
1128 /* dma buffer may have been padded at allocation */
1129 DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1130 dma_buf->pa, dma_buf->dmah);
1131
1132 memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1133 }
1134
1135 /**
1136 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1137 * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1138 */
1139 void
1140 dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1141 void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1142 {
1143 dhd_dma_buf_t *dma_buf;
1144 ASSERT(dhd_dma_buf);
1145 dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1146 dma_buf->va = va;
1147 dma_buf->len = len;
1148 dma_buf->pa = pa;
1149 dma_buf->dmah = dmah;
1150 dma_buf->secdma = secdma;
1151
1152 /* Audit user defined configuration */
1153 (void)dhd_dma_buf_audit(dhd, dma_buf);
1154 }
1155
1156 /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1157
1158 /*
1159 * +---------------------------------------------------------------------------+
1160 * DHD_MAP_PKTID_LOGGING
1161 * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1162 * debugging in customer platform.
1163 * +---------------------------------------------------------------------------+
1164 */
1165
1166 #ifdef DHD_MAP_PKTID_LOGGING
1167 typedef struct dhd_pktid_log_item {
1168 dmaaddr_t pa; /* DMA bus address */
1169 uint64 ts_nsec; /* Timestamp: nsec */
1170 uint32 size; /* DMA map/unmap size */
1171 uint32 pktid; /* Packet ID */
1172 uint8 pkttype; /* Packet Type */
1173 uint8 rsvd[7]; /* Reserved for future use */
1174 } dhd_pktid_log_item_t;
1175
1176 typedef struct dhd_pktid_log {
1177 uint32 items; /* number of total items */
1178 uint32 index; /* index of pktid_log_item */
1179 dhd_pktid_log_item_t map[0]; /* metadata storage */
1180 } dhd_pktid_log_t;
1181
1182 typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
1183
1184 #define MAX_PKTID_LOG (2048)
1185 #define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
1186 #define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
1187 ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1188
1189 #define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
1190 #define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
1191 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
1192 dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1193 #define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
1194
1195 static dhd_pktid_log_handle_t *
1196 dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
1197 {
1198 dhd_pktid_log_t *log;
1199 uint32 log_size;
1200
1201 log_size = DHD_PKTID_LOG_SZ(num_items);
1202 log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
1203 if (log == NULL) {
1204 DHD_ERROR(("%s: MALLOC failed for size %d\n",
1205 __FUNCTION__, log_size));
1206 return (dhd_pktid_log_handle_t *)NULL;
1207 }
1208
1209 log->items = num_items;
1210 log->index = 0;
1211
1212 return (dhd_pktid_log_handle_t *)log; /* opaque handle */
1213 }
1214
1215 static void
1216 dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
1217 {
1218 dhd_pktid_log_t *log;
1219 uint32 log_size;
1220
1221 if (handle == NULL) {
1222 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1223 return;
1224 }
1225
1226 log = (dhd_pktid_log_t *)handle;
1227 log_size = DHD_PKTID_LOG_SZ(log->items);
1228 MFREE(dhd->osh, handle, log_size);
1229 }
1230
1231 static void
1232 dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
1233 uint32 pktid, uint32 len, uint8 pkttype)
1234 {
1235 dhd_pktid_log_t *log;
1236 uint32 idx;
1237
1238 if (handle == NULL) {
1239 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
1240 return;
1241 }
1242
1243 log = (dhd_pktid_log_t *)handle;
1244 idx = log->index;
1245 log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
1246 log->map[idx].pa = pa;
1247 log->map[idx].pktid = pktid;
1248 log->map[idx].size = len;
1249 log->map[idx].pkttype = pkttype;
1250 log->index = (idx + 1) % (log->items); /* update index */
1251 }
1252
1253 void
1254 dhd_pktid_logging_dump(dhd_pub_t *dhd)
1255 {
1256 dhd_prot_t *prot = dhd->prot;
1257 dhd_pktid_log_t *map_log, *unmap_log;
1258 uint64 ts_sec, ts_usec;
1259
1260 if (prot == NULL) {
1261 DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
1262 return;
1263 }
1264
1265 map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
1266 unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
1267 OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
1268 if (map_log && unmap_log) {
1269 DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1270 "current time=[%5lu.%06lu]\n", __FUNCTION__,
1271 map_log->index, unmap_log->index,
1272 (unsigned long)ts_sec, (unsigned long)ts_usec));
1273 DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1274 "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
1275 (uint64)__virt_to_phys((ulong)(map_log->map)),
1276 (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
1277 (uint64)__virt_to_phys((ulong)(unmap_log->map)),
1278 (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
1279 }
1280 }
1281 #endif /* DHD_MAP_PKTID_LOGGING */
1282
1283 /* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1284
1285 /*
1286 * +---------------------------------------------------------------------------+
1287 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1288 * Main purpose is to save memory on the dongle, has other purposes as well.
1289 * The packet id map, also includes storage for some packet parameters that
1290 * may be saved. A native packet pointer along with the parameters may be saved
1291 * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1292 * and the metadata may be retrieved using the previously allocated packet id.
1293 * +---------------------------------------------------------------------------+
1294 */
1295 #define DHD_PCIE_PKTID
1296 #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1297 #define MAX_RX_PKTID (1024)
1298 #define MAX_TX_PKTID (3072 * 2)
1299
1300 /* On Router, the pktptr serves as a pktid. */
1301
1302 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1303 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1304 #endif // endif
1305
1306 /* Enum for marking the buffer color based on usage */
1307 typedef enum dhd_pkttype {
1308 PKTTYPE_DATA_TX = 0,
1309 PKTTYPE_DATA_RX,
1310 PKTTYPE_IOCTL_RX,
1311 PKTTYPE_EVENT_RX,
1312 PKTTYPE_INFO_RX,
1313 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1314 PKTTYPE_NO_CHECK,
1315 PKTTYPE_TSBUF_RX
1316 } dhd_pkttype_t;
1317
1318 #define DHD_PKTID_INVALID (0U)
1319 #define DHD_IOCTL_REQ_PKTID (0xFFFE)
1320 #define DHD_FAKE_PKTID (0xFACE)
1321 #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1322 #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1323 #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1324 #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
1325 #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
1326 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
1327
1328 #define IS_FLOWRING(ring) \
1329 ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1330
1331 typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1332
1333 /* Construct a packet id mapping table, returning an opaque map handle */
1334 static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1335
1336 /* Destroy a packet id mapping table, freeing all packets active in the table */
1337 static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1338
1339 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1340 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
1341 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1342 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
1343
1344 #ifdef MACOSX_DHD
1345 #undef DHD_PCIE_PKTID
1346 #define DHD_PCIE_PKTID 1
1347 #endif /* MACOSX_DHD */
1348
1349 #if defined(DHD_PCIE_PKTID)
1350 #if defined(MACOSX_DHD)
1351 #define IOCTLRESP_USE_CONSTMEM
1352 static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1353 static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1354 #endif // endif
1355
1356 /* Determine number of pktids that are available */
1357 static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1358
1359 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1360 static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1361 void *pkt, dhd_pkttype_t pkttype);
1362 static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1363 void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1364 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1365 static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1366 void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1367 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1368
1369 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1370 static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1371 uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1372 void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1373
1374 /*
1375 * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1376 *
1377 * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1378 * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1379 *
1380 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1381 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1382 */
1383 #if defined(DHD_PKTID_AUDIT_ENABLED)
1384 #define USE_DHD_PKTID_AUDIT_LOCK 1
1385 /* Audit the pktidmap allocator */
1386 /* #define DHD_PKTID_AUDIT_MAP */
1387
1388 /* Audit the pktid during production/consumption of workitems */
1389 #define DHD_PKTID_AUDIT_RING
1390
1391 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1392 #error "May only enabled audit of MAP or RING, at a time."
1393 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1394
1395 #define DHD_DUPLICATE_ALLOC 1
1396 #define DHD_DUPLICATE_FREE 2
1397 #define DHD_TEST_IS_ALLOC 3
1398 #define DHD_TEST_IS_FREE 4
1399
1400 typedef enum dhd_pktid_map_type {
1401 DHD_PKTID_MAP_TYPE_CTRL = 1,
1402 DHD_PKTID_MAP_TYPE_TX,
1403 DHD_PKTID_MAP_TYPE_RX,
1404 DHD_PKTID_MAP_TYPE_UNKNOWN
1405 } dhd_pktid_map_type_t;
1406
1407 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1408 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1409 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1410 #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1411 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1412 #else
1413 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1414 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
1415 #define DHD_PKTID_AUDIT_LOCK(lock) 0
1416 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
1417 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1418
1419 #endif /* DHD_PKTID_AUDIT_ENABLED */
1420
1421 #define USE_DHD_PKTID_LOCK 1
1422
1423 #ifdef USE_DHD_PKTID_LOCK
1424 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1425 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1426 #define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
1427 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1428 #else
1429 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1430 #define DHD_PKTID_LOCK_DEINIT(osh, lock) \
1431 do { \
1432 BCM_REFERENCE(osh); \
1433 BCM_REFERENCE(lock); \
1434 } while (0)
1435 #define DHD_PKTID_LOCK(lock) 0
1436 #define DHD_PKTID_UNLOCK(lock, flags) \
1437 do { \
1438 BCM_REFERENCE(lock); \
1439 BCM_REFERENCE(flags); \
1440 } while (0)
1441 #endif /* !USE_DHD_PKTID_LOCK */
1442
1443 typedef enum dhd_locker_state {
1444 LOCKER_IS_FREE,
1445 LOCKER_IS_BUSY,
1446 LOCKER_IS_RSVD
1447 } dhd_locker_state_t;
1448
1449 /* Packet metadata saved in packet id mapper */
1450
1451 typedef struct dhd_pktid_item {
1452 dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1453 uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1454 dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1455 uint16 len; /* length of mapped packet's buffer */
1456 void *pkt; /* opaque native pointer to a packet */
1457 dmaaddr_t pa; /* physical address of mapped packet's buffer */
1458 void *dmah; /* handle to OS specific DMA map */
1459 void *secdma;
1460 } dhd_pktid_item_t;
1461
1462 typedef uint32 dhd_pktid_key_t;
1463
1464 typedef struct dhd_pktid_map {
1465 uint32 items; /* total items in map */
1466 uint32 avail; /* total available items */
1467 int failures; /* lockers unavailable count */
1468 /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1469 void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
1470
1471 #if defined(DHD_PKTID_AUDIT_ENABLED)
1472 void *pktid_audit_lock;
1473 struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1474 #endif /* DHD_PKTID_AUDIT_ENABLED */
1475 dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
1476 dhd_pktid_item_t lockers[0]; /* metadata storage */
1477 } dhd_pktid_map_t;
1478
1479 /*
1480 * PktId (Locker) #0 is never allocated and is considered invalid.
1481 *
1482 * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1483 * depleted pktid pool and must not be used by the caller.
1484 *
1485 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1486 */
1487
1488 #define DHD_PKTID_FREE_LOCKER (FALSE)
1489 #define DHD_PKTID_RSV_LOCKER (TRUE)
1490
1491 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1492 #define DHD_PKIDMAP_ITEMS(items) (items)
1493 #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1494 (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1495 #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
1496
1497 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
1498
1499 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1500 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
1501 dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1502 /* Reuse a previously reserved locker to save packet params */
1503 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1504 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1505 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1506 (dhd_pkttype_t)(pkttype))
1507 /* Convert a packet to a pktid, and save packet params in locker */
1508 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1509 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1510 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1511 (dhd_pkttype_t)(pkttype))
1512
1513 /* Convert pktid to a packet, and free the locker */
1514 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1515 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1516 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1517 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1518
1519 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1520 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1521 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1522 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1523 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1524
1525 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1526
1527 #if defined(DHD_PKTID_AUDIT_ENABLED)
1528
1529 static int
1530 dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
1531 {
1532 dhd_prot_t *prot = dhd->prot;
1533 int pktid_map_type;
1534
1535 if (pktid_map == prot->pktid_ctrl_map) {
1536 pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
1537 } else if (pktid_map == prot->pktid_tx_map) {
1538 pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
1539 } else if (pktid_map == prot->pktid_rx_map) {
1540 pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
1541 } else {
1542 pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
1543 }
1544
1545 return pktid_map_type;
1546 }
1547
1548 /**
1549 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1550 */
1551 static int
1552 __dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1553 const int test_for, const char *errmsg)
1554 {
1555 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1556 struct bcm_mwbmap *handle;
1557 uint32 flags;
1558 bool ignore_audit;
1559 int error = BCME_OK;
1560
1561 if (pktid_map == (dhd_pktid_map_t *)NULL) {
1562 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1563 return BCME_OK;
1564 }
1565
1566 flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1567
1568 handle = pktid_map->pktid_audit;
1569 if (handle == (struct bcm_mwbmap *)NULL) {
1570 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1571 goto out;
1572 }
1573
1574 /* Exclude special pktids from audit */
1575 ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1576 if (ignore_audit) {
1577 goto out;
1578 }
1579
1580 if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1581 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1582 error = BCME_ERROR;
1583 goto out;
1584 }
1585
1586 /* Perform audit */
1587 switch (test_for) {
1588 case DHD_DUPLICATE_ALLOC:
1589 if (!bcm_mwbmap_isfree(handle, pktid)) {
1590 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1591 errmsg, pktid));
1592 error = BCME_ERROR;
1593 } else {
1594 bcm_mwbmap_force(handle, pktid);
1595 }
1596 break;
1597
1598 case DHD_DUPLICATE_FREE:
1599 if (bcm_mwbmap_isfree(handle, pktid)) {
1600 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1601 errmsg, pktid));
1602 error = BCME_ERROR;
1603 } else {
1604 bcm_mwbmap_free(handle, pktid);
1605 }
1606 break;
1607
1608 case DHD_TEST_IS_ALLOC:
1609 if (bcm_mwbmap_isfree(handle, pktid)) {
1610 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1611 errmsg, pktid));
1612 error = BCME_ERROR;
1613 }
1614 break;
1615
1616 case DHD_TEST_IS_FREE:
1617 if (!bcm_mwbmap_isfree(handle, pktid)) {
1618 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1619 errmsg, pktid));
1620 error = BCME_ERROR;
1621 }
1622 break;
1623
1624 default:
1625 DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
1626 error = BCME_ERROR;
1627 break;
1628 }
1629
1630 out:
1631 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1632 return error;
1633 }
1634
1635 static int
1636 dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1637 const int test_for, const char *errmsg)
1638 {
1639 int ret = BCME_OK;
1640 ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
1641 if (ret == BCME_ERROR) {
1642 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
1643 __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
1644 dhd_pktid_error_handler(dhd);
1645 }
1646
1647 return ret;
1648 }
1649
1650 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1651 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1652
1653 static int
1654 dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
1655 const int test_for, void *msg, uint32 msg_len, const char *func)
1656 {
1657 int ret = BCME_OK;
1658 ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
1659 if (ret == BCME_ERROR) {
1660 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
1661 __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
1662 prhex(func, (uchar *)msg, msg_len);
1663 dhd_pktid_error_handler(dhdp);
1664 }
1665 return ret;
1666 }
1667 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
1668 dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
1669 (pktid), (test_for), msg, msg_len, __FUNCTION__)
1670
1671 #endif /* DHD_PKTID_AUDIT_ENABLED */
1672
1673 /**
1674 * +---------------------------------------------------------------------------+
1675 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1676 *
1677 * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
1678 *
1679 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1680 * packet id is returned. This unique packet id may be used to retrieve the
1681 * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1682 * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1683 * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1684 *
1685 * Implementation Note:
1686 * Convert this into a <key,locker> abstraction and place into bcmutils !
1687 * Locker abstraction should treat contents as opaque storage, and a
1688 * callback should be registered to handle busy lockers on destructor.
1689 *
1690 * +---------------------------------------------------------------------------+
1691 */
1692
1693 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1694
1695 static dhd_pktid_map_handle_t *
1696 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
1697 {
1698 void* osh;
1699 uint32 nkey;
1700 dhd_pktid_map_t *map;
1701 uint32 dhd_pktid_map_sz;
1702 uint32 map_items;
1703 uint32 map_keys_sz;
1704 osh = dhd->osh;
1705
1706 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
1707
1708 map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
1709 if (map == NULL) {
1710 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1711 __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1712 return (dhd_pktid_map_handle_t *)NULL;
1713 }
1714
1715 map->items = num_items;
1716 map->avail = num_items;
1717
1718 map_items = DHD_PKIDMAP_ITEMS(map->items);
1719
1720 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1721
1722 /* Initialize the lock that protects this structure */
1723 map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
1724 if (map->pktid_lock == NULL) {
1725 DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
1726 goto error;
1727 }
1728
1729 map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
1730 if (map->keys == NULL) {
1731 DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
1732 __FUNCTION__, __LINE__, map_keys_sz));
1733 goto error;
1734 }
1735
1736 #if defined(DHD_PKTID_AUDIT_ENABLED)
1737 /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1738 map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1739 if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1740 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
1741 goto error;
1742 } else {
1743 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1744 __FUNCTION__, __LINE__, map_items + 1));
1745 }
1746 map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1747 #endif /* DHD_PKTID_AUDIT_ENABLED */
1748
1749 for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
1750 map->keys[nkey] = nkey; /* populate with unique keys */
1751 map->lockers[nkey].state = LOCKER_IS_FREE;
1752 map->lockers[nkey].pkt = NULL; /* bzero: redundant */
1753 map->lockers[nkey].len = 0;
1754 }
1755
1756 /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
1757 map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
1758 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
1759 map->lockers[DHD_PKTID_INVALID].len = 0;
1760
1761 #if defined(DHD_PKTID_AUDIT_ENABLED)
1762 /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1763 bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
1764 #endif /* DHD_PKTID_AUDIT_ENABLED */
1765
1766 return (dhd_pktid_map_handle_t *)map; /* opaque handle */
1767
1768 error:
1769 if (map) {
1770 #if defined(DHD_PKTID_AUDIT_ENABLED)
1771 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1772 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1773 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1774 if (map->pktid_audit_lock)
1775 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1776 }
1777 #endif /* DHD_PKTID_AUDIT_ENABLED */
1778
1779 if (map->keys) {
1780 MFREE(osh, map->keys, map_keys_sz);
1781 }
1782
1783 if (map->pktid_lock) {
1784 DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
1785 }
1786
1787 VMFREE(osh, map, dhd_pktid_map_sz);
1788 }
1789 return (dhd_pktid_map_handle_t *)NULL;
1790 }
1791
1792 /**
1793 * Retrieve all allocated keys and free all <numbered_key, locker>.
1794 * Freeing implies: unmapping the buffers and freeing the native packet
1795 * This could have been a callback registered with the pktid mapper.
1796 */
1797 static void
1798 dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1799 {
1800 void *osh;
1801 uint32 nkey;
1802 dhd_pktid_map_t *map;
1803 dhd_pktid_item_t *locker;
1804 uint32 map_items;
1805 unsigned long flags;
1806 bool data_tx = FALSE;
1807
1808 map = (dhd_pktid_map_t *)handle;
1809 DHD_PKTID_LOCK(map->pktid_lock, flags);
1810 osh = dhd->osh;
1811
1812 map_items = DHD_PKIDMAP_ITEMS(map->items);
1813 /* skip reserved KEY #0, and start from 1 */
1814
1815 for (nkey = 1; nkey <= map_items; nkey++) {
1816 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1817 locker = &map->lockers[nkey];
1818 locker->state = LOCKER_IS_FREE;
1819 data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
1820 if (data_tx) {
1821 OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
1822 }
1823
1824 #ifdef DHD_PKTID_AUDIT_RING
1825 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1826 #endif /* DHD_PKTID_AUDIT_RING */
1827 #ifdef DHD_MAP_PKTID_LOGGING
1828 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
1829 locker->pa, nkey, locker->len,
1830 locker->pkttype);
1831 #endif /* DHD_MAP_PKTID_LOGGING */
1832
1833 {
1834 if (SECURE_DMA_ENAB(dhd->osh))
1835 SECURE_DMA_UNMAP(osh, locker->pa,
1836 locker->len, locker->dir, 0,
1837 locker->dmah, locker->secdma, 0);
1838 else
1839 DMA_UNMAP(osh, locker->pa, locker->len,
1840 locker->dir, 0, locker->dmah);
1841 }
1842 dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1843 locker->pkttype, data_tx);
1844 }
1845 else {
1846 #ifdef DHD_PKTID_AUDIT_RING
1847 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1848 #endif /* DHD_PKTID_AUDIT_RING */
1849 }
1850 map->keys[nkey] = nkey; /* populate with unique keys */
1851 }
1852
1853 map->avail = map_items;
1854 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1855 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1856 }
1857
1858 #ifdef IOCTLRESP_USE_CONSTMEM
1859 /** Called in detach scenario. Releasing IOCTL buffers. */
1860 static void
1861 dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1862 {
1863 uint32 nkey;
1864 dhd_pktid_map_t *map;
1865 dhd_pktid_item_t *locker;
1866 uint32 map_items;
1867 unsigned long flags;
1868
1869 map = (dhd_pktid_map_t *)handle;
1870 DHD_PKTID_LOCK(map->pktid_lock, flags);
1871
1872 map_items = DHD_PKIDMAP_ITEMS(map->items);
1873 /* skip reserved KEY #0, and start from 1 */
1874 for (nkey = 1; nkey <= map_items; nkey++) {
1875 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1876 dhd_dma_buf_t retbuf;
1877
1878 #ifdef DHD_PKTID_AUDIT_RING
1879 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1880 #endif /* DHD_PKTID_AUDIT_RING */
1881
1882 locker = &map->lockers[nkey];
1883 retbuf.va = locker->pkt;
1884 retbuf.len = locker->len;
1885 retbuf.pa = locker->pa;
1886 retbuf.dmah = locker->dmah;
1887 retbuf.secdma = locker->secdma;
1888
1889 free_ioctl_return_buffer(dhd, &retbuf);
1890 }
1891 else {
1892 #ifdef DHD_PKTID_AUDIT_RING
1893 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1894 #endif /* DHD_PKTID_AUDIT_RING */
1895 }
1896 map->keys[nkey] = nkey; /* populate with unique keys */
1897 }
1898
1899 map->avail = map_items;
1900 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1901 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1902 }
1903 #endif /* IOCTLRESP_USE_CONSTMEM */
1904
1905 /**
1906 * Free the pktid map.
1907 */
1908 static void
1909 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1910 {
1911 dhd_pktid_map_t *map;
1912 uint32 dhd_pktid_map_sz;
1913 uint32 map_keys_sz;
1914
1915 if (handle == NULL)
1916 return;
1917
1918 /* Free any pending packets */
1919 dhd_pktid_map_reset(dhd, handle);
1920
1921 map = (dhd_pktid_map_t *)handle;
1922 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1923 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1924
1925 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
1926
1927 #if defined(DHD_PKTID_AUDIT_ENABLED)
1928 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1929 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1930 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1931 if (map->pktid_audit_lock) {
1932 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1933 }
1934 }
1935 #endif /* DHD_PKTID_AUDIT_ENABLED */
1936 MFREE(dhd->osh, map->keys, map_keys_sz);
1937 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1938 }
1939 #ifdef IOCTLRESP_USE_CONSTMEM
1940 static void
1941 dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1942 {
1943 dhd_pktid_map_t *map;
1944 uint32 dhd_pktid_map_sz;
1945 uint32 map_keys_sz;
1946
1947 if (handle == NULL)
1948 return;
1949
1950 /* Free any pending packets */
1951 dhd_pktid_map_reset_ioctl(dhd, handle);
1952
1953 map = (dhd_pktid_map_t *)handle;
1954 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1955 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1956
1957 DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
1958
1959 #if defined(DHD_PKTID_AUDIT_ENABLED)
1960 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1961 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1962 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1963 if (map->pktid_audit_lock) {
1964 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1965 }
1966 }
1967 #endif /* DHD_PKTID_AUDIT_ENABLED */
1968
1969 MFREE(dhd->osh, map->keys, map_keys_sz);
1970 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1971 }
1972 #endif /* IOCTLRESP_USE_CONSTMEM */
1973
1974 /** Get the pktid free count */
1975 static INLINE uint32 BCMFASTPATH
1976 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
1977 {
1978 dhd_pktid_map_t *map;
1979 uint32 avail;
1980 unsigned long flags;
1981
1982 ASSERT(handle != NULL);
1983 map = (dhd_pktid_map_t *)handle;
1984
1985 DHD_PKTID_LOCK(map->pktid_lock, flags);
1986 avail = map->avail;
1987 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
1988
1989 return avail;
1990 }
1991
1992 /**
1993 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1994 * yet populated. Invoke the pktid save api to populate the packet parameters
1995 * into the locker. This function is not reentrant, and is the caller's
1996 * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
1997 * a failure case, implying a depleted pool of pktids.
1998 */
1999 static INLINE uint32
2000 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
2001 void *pkt, dhd_pkttype_t pkttype)
2002 {
2003 uint32 nkey;
2004 dhd_pktid_map_t *map;
2005 dhd_pktid_item_t *locker;
2006 unsigned long flags;
2007
2008 ASSERT(handle != NULL);
2009 map = (dhd_pktid_map_t *)handle;
2010
2011 DHD_PKTID_LOCK(map->pktid_lock, flags);
2012
2013 if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
2014 map->failures++;
2015 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
2016 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2017 return DHD_PKTID_INVALID; /* failed alloc request */
2018 }
2019
2020 ASSERT(map->avail <= map->items);
2021 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
2022
2023 if ((map->avail > map->items) || (nkey > map->items)) {
2024 map->failures++;
2025 DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2026 " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2027 __FUNCTION__, __LINE__, map->avail, nkey,
2028 pkttype));
2029 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2030 return DHD_PKTID_INVALID; /* failed alloc request */
2031 }
2032
2033 locker = &map->lockers[nkey]; /* save packet metadata in locker */
2034 map->avail--;
2035 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
2036 locker->len = 0;
2037 locker->state = LOCKER_IS_BUSY; /* reserve this locker */
2038
2039 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2040
2041 ASSERT(nkey != DHD_PKTID_INVALID);
2042
2043 return nkey; /* return locker's numbered key */
2044 }
2045
2046 /*
2047 * dhd_pktid_map_save - Save a packet's parameters into a locker
2048 * corresponding to a previously reserved unique numbered key.
2049 */
2050 static INLINE void
2051 dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2052 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2053 dhd_pkttype_t pkttype)
2054 {
2055 dhd_pktid_map_t *map;
2056 dhd_pktid_item_t *locker;
2057 unsigned long flags;
2058
2059 ASSERT(handle != NULL);
2060 map = (dhd_pktid_map_t *)handle;
2061
2062 DHD_PKTID_LOCK(map->pktid_lock, flags);
2063
2064 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2065 DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2066 __FUNCTION__, __LINE__, nkey, pkttype));
2067 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2068 #ifdef DHD_FW_COREDUMP
2069 if (dhd->memdump_enabled) {
2070 /* collect core dump */
2071 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2072 dhd_bus_mem_dump(dhd);
2073 }
2074 #else
2075 ASSERT(0);
2076 #endif /* DHD_FW_COREDUMP */
2077 return;
2078 }
2079
2080 locker = &map->lockers[nkey];
2081
2082 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
2083 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
2084
2085 /* store contents in locker */
2086 locker->dir = dir;
2087 locker->pa = pa;
2088 locker->len = (uint16)len; /* 16bit len */
2089 locker->dmah = dmah; /* 16bit len */
2090 locker->secdma = secdma;
2091 locker->pkttype = pkttype;
2092 locker->pkt = pkt;
2093 locker->state = LOCKER_IS_BUSY; /* make this locker busy */
2094 #ifdef DHD_MAP_PKTID_LOGGING
2095 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
2096 #endif /* DHD_MAP_PKTID_LOGGING */
2097 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2098 }
2099
2100 /**
2101 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2102 * contents into the corresponding locker. Return the numbered key.
2103 */
2104 static uint32 BCMFASTPATH
2105 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
2106 dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
2107 dhd_pkttype_t pkttype)
2108 {
2109 uint32 nkey;
2110
2111 nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
2112 if (nkey != DHD_PKTID_INVALID) {
2113 dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
2114 len, dir, dmah, secdma, pkttype);
2115 }
2116
2117 return nkey;
2118 }
2119
2120 /**
2121 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2122 * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2123 * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2124 * value. Only a previously allocated pktid may be freed.
2125 */
2126 static void * BCMFASTPATH
2127 dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
2128 dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
2129 bool rsv_locker)
2130 {
2131 dhd_pktid_map_t *map;
2132 dhd_pktid_item_t *locker;
2133 void * pkt;
2134 unsigned long long locker_addr;
2135 unsigned long flags;
2136
2137 ASSERT(handle != NULL);
2138
2139 map = (dhd_pktid_map_t *)handle;
2140
2141 DHD_PKTID_LOCK(map->pktid_lock, flags);
2142
2143 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
2144 DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2145 __FUNCTION__, __LINE__, nkey, pkttype));
2146 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2147 #ifdef DHD_FW_COREDUMP
2148 if (dhd->memdump_enabled) {
2149 /* collect core dump */
2150 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2151 dhd_bus_mem_dump(dhd);
2152 }
2153 #else
2154 ASSERT(0);
2155 #endif /* DHD_FW_COREDUMP */
2156 return NULL;
2157 }
2158
2159 locker = &map->lockers[nkey];
2160
2161 #if defined(DHD_PKTID_AUDIT_MAP)
2162 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
2163 #endif /* DHD_PKTID_AUDIT_MAP */
2164
2165 /* Debug check for cloned numbered key */
2166 if (locker->state == LOCKER_IS_FREE) {
2167 DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2168 __FUNCTION__, __LINE__, nkey));
2169 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2170 #ifdef DHD_FW_COREDUMP
2171 if (dhd->memdump_enabled) {
2172 /* collect core dump */
2173 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2174 dhd_bus_mem_dump(dhd);
2175 }
2176 #else
2177 ASSERT(0);
2178 #endif /* DHD_FW_COREDUMP */
2179 return NULL;
2180 }
2181
2182 /* Check for the colour of the buffer i.e The buffer posted for TX,
2183 * should be freed for TX completion. Similarly the buffer posted for
2184 * IOCTL should be freed for IOCT completion etc.
2185 */
2186 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
2187
2188 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2189 __FUNCTION__, __LINE__, nkey));
2190 #ifdef BCMDMA64OSL
2191 PHYSADDRTOULONG(locker->pa, locker_addr);
2192 #else
2193 locker_addr = PHYSADDRLO(locker->pa);
2194 #endif /* BCMDMA64OSL */
2195 DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2196 "pkttype <%d> locker->pa <0x%llx> \n",
2197 __FUNCTION__, __LINE__, locker->state, locker->pkttype,
2198 pkttype, locker_addr));
2199 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2200 #ifdef DHD_FW_COREDUMP
2201 if (dhd->memdump_enabled) {
2202 /* collect core dump */
2203 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
2204 dhd_bus_mem_dump(dhd);
2205 }
2206 #else
2207 ASSERT(0);
2208 #endif /* DHD_FW_COREDUMP */
2209 return NULL;
2210 }
2211
2212 if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
2213 map->avail++;
2214 map->keys[map->avail] = nkey; /* make this numbered key available */
2215 locker->state = LOCKER_IS_FREE; /* open and free Locker */
2216 } else {
2217 /* pktid will be reused, but the locker does not have a valid pkt */
2218 locker->state = LOCKER_IS_RSVD;
2219 }
2220
2221 #if defined(DHD_PKTID_AUDIT_MAP)
2222 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
2223 #endif /* DHD_PKTID_AUDIT_MAP */
2224 #ifdef DHD_MAP_PKTID_LOGGING
2225 DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
2226 (uint32)locker->len, pkttype);
2227 #endif /* DHD_MAP_PKTID_LOGGING */
2228
2229 *pa = locker->pa; /* return contents of locker */
2230 *len = (uint32)locker->len;
2231 *dmah = locker->dmah;
2232 *secdma = locker->secdma;
2233
2234 pkt = locker->pkt;
2235 locker->pkt = NULL; /* Clear pkt */
2236 locker->len = 0;
2237
2238 DHD_PKTID_UNLOCK(map->pktid_lock, flags);
2239
2240 return pkt;
2241 }
2242
2243 #else /* ! DHD_PCIE_PKTID */
2244
2245 typedef struct pktlist {
2246 PKT_LIST *tx_pkt_list; /* list for tx packets */
2247 PKT_LIST *rx_pkt_list; /* list for rx packets */
2248 PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
2249 } pktlists_t;
2250
2251 /*
2252 * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2253 * of a one to one mapping 32bit pktptr and a 32bit pktid.
2254 *
2255 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2256 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2257 * a lock.
2258 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2259 */
2260 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
2261 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
2262
2263 static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2264 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2265 dhd_pkttype_t pkttype);
2266 static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2267 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2268 dhd_pkttype_t pkttype);
2269
2270 static dhd_pktid_map_handle_t *
2271 dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2272 {
2273 osl_t *osh = dhd->osh;
2274 pktlists_t *handle = NULL;
2275
2276 if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2277 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2278 __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2279 goto error_done;
2280 }
2281
2282 if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2283 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2284 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2285 goto error;
2286 }
2287
2288 if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2289 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2290 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2291 goto error;
2292 }
2293
2294 if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2295 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2296 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2297 goto error;
2298 }
2299
2300 PKTLIST_INIT(handle->tx_pkt_list);
2301 PKTLIST_INIT(handle->rx_pkt_list);
2302 PKTLIST_INIT(handle->ctrl_pkt_list);
2303
2304 return (dhd_pktid_map_handle_t *) handle;
2305
2306 error:
2307 if (handle->ctrl_pkt_list) {
2308 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2309 }
2310
2311 if (handle->rx_pkt_list) {
2312 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2313 }
2314
2315 if (handle->tx_pkt_list) {
2316 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2317 }
2318
2319 if (handle) {
2320 MFREE(osh, handle, sizeof(pktlists_t));
2321 }
2322
2323 error_done:
2324 return (dhd_pktid_map_handle_t *)NULL;
2325 }
2326
2327 static void
2328 dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
2329 {
2330 osl_t *osh = dhd->osh;
2331
2332 if (handle->ctrl_pkt_list) {
2333 PKTLIST_FINI(handle->ctrl_pkt_list);
2334 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2335 }
2336
2337 if (handle->rx_pkt_list) {
2338 PKTLIST_FINI(handle->rx_pkt_list);
2339 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2340 }
2341
2342 if (handle->tx_pkt_list) {
2343 PKTLIST_FINI(handle->tx_pkt_list);
2344 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2345 }
2346 }
2347
2348 static void
2349 dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2350 {
2351 osl_t *osh = dhd->osh;
2352 pktlists_t *handle = (pktlists_t *) map;
2353
2354 ASSERT(handle != NULL);
2355 if (handle == (pktlists_t *)NULL) {
2356 return;
2357 }
2358
2359 dhd_pktid_map_reset(dhd, handle);
2360
2361 if (handle) {
2362 MFREE(osh, handle, sizeof(pktlists_t));
2363 }
2364 }
2365
2366 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2367 static INLINE uint32
2368 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2369 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2370 dhd_pkttype_t pkttype)
2371 {
2372 pktlists_t *handle = (pktlists_t *) map;
2373 ASSERT(pktptr32 != NULL);
2374 DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2375 DHD_PKT_SET_DMAH(pktptr32, dmah);
2376 DHD_PKT_SET_PA(pktptr32, pa);
2377 DHD_PKT_SET_SECDMA(pktptr32, secdma);
2378
2379 if (pkttype == PKTTYPE_DATA_TX) {
2380 PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
2381 } else if (pkttype == PKTTYPE_DATA_RX) {
2382 PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
2383 } else {
2384 PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
2385 }
2386
2387 return DHD_PKTID32(pktptr32);
2388 }
2389
2390 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2391 static INLINE void *
2392 dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2393 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2394 dhd_pkttype_t pkttype)
2395 {
2396 pktlists_t *handle = (pktlists_t *) map;
2397 void *pktptr32;
2398
2399 ASSERT(pktid32 != 0U);
2400 pktptr32 = DHD_PKTPTR32(pktid32);
2401 *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2402 *dmah = DHD_PKT_GET_DMAH(pktptr32);
2403 *pa = DHD_PKT_GET_PA(pktptr32);
2404 *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2405
2406 if (pkttype == PKTTYPE_DATA_TX) {
2407 PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
2408 } else if (pkttype == PKTTYPE_DATA_RX) {
2409 PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
2410 } else {
2411 PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
2412 }
2413
2414 return pktptr32;
2415 }
2416
2417 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
2418
2419 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2420 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2421 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2422 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2423 })
2424
2425 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2426 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2427 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2428 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2429 })
2430
2431 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2432 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
2433 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2434 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2435 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2436 })
2437
2438 #define DHD_PKTID_AVAIL(map) (~0)
2439
2440 #endif /* ! DHD_PCIE_PKTID */
2441
2442 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2443
2444 /**
2445 * The PCIE FD protocol layer is constructed in two phases:
2446 * Phase 1. dhd_prot_attach()
2447 * Phase 2. dhd_prot_init()
2448 *
2449 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2450 * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2451 * with DMA-able buffers).
2452 * All dhd_dma_buf_t objects are also allocated here.
2453 *
2454 * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2455 * initialization of objects that requires information advertized by the dongle
2456 * may not be performed here.
2457 * E.g. the number of TxPost flowrings is not know at this point, neither do
2458 * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2459 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2460 * rings (common + flow).
2461 *
2462 * dhd_prot_init() is invoked after the bus layer has fetched the information
2463 * advertized by the dongle in the pcie_shared_t.
2464 */
2465 int
2466 dhd_prot_attach(dhd_pub_t *dhd)
2467 {
2468 osl_t *osh = dhd->osh;
2469 dhd_prot_t *prot;
2470
2471 /* FW going to DMA extended trap data,
2472 * allocate buffer for the maximum extended trap data.
2473 */
2474 #ifdef D2H_MINIDUMP
2475 uint32 trap_buf_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN;
2476 #else
2477 uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
2478 #endif /* D2H_MINIDUMP */
2479
2480 /* Allocate prot structure */
2481 if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2482 sizeof(dhd_prot_t)))) {
2483 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2484 goto fail;
2485 }
2486 memset(prot, 0, sizeof(*prot));
2487
2488 prot->osh = osh;
2489 dhd->prot = prot;
2490
2491 /* DMAing ring completes supported? FALSE by default */
2492 dhd->dma_d2h_ring_upd_support = FALSE;
2493 dhd->dma_h2d_ring_upd_support = FALSE;
2494 dhd->dma_ring_upd_overwrite = FALSE;
2495
2496 dhd->idma_inited = 0;
2497 dhd->ifrm_inited = 0;
2498 dhd->dar_inited = 0;
2499
2500 /* Common Ring Allocations */
2501
2502 /* Ring 0: H2D Control Submission */
2503 if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2504 H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2505 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2506 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2507 __FUNCTION__));
2508 goto fail;
2509 }
2510
2511 /* Ring 1: H2D Receive Buffer Post */
2512 if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2513 H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2514 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2515 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2516 __FUNCTION__));
2517 goto fail;
2518 }
2519
2520 /* Ring 2: D2H Control Completion */
2521 if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2522 D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2523 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2524 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2525 __FUNCTION__));
2526 goto fail;
2527 }
2528
2529 /* Ring 3: D2H Transmit Complete */
2530 if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2531 D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2532 BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2533 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2534 __FUNCTION__));
2535 goto fail;
2536
2537 }
2538
2539 /* Ring 4: D2H Receive Complete */
2540 if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2541 D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2542 BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2543 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2544 __FUNCTION__));
2545 goto fail;
2546
2547 }
2548
2549 /*
2550 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2551 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2552 * See dhd_prot_flowrings_pool_attach()
2553 */
2554 /* ioctl response buffer */
2555 if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2556 goto fail;
2557 }
2558
2559 /* IOCTL request buffer */
2560 if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2561 goto fail;
2562 }
2563
2564 /* Host TS request buffer one buffer for now */
2565 if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2566 goto fail;
2567 }
2568 prot->hostts_req_buf_inuse = FALSE;
2569
2570 /* Scratch buffer for dma rx offset */
2571 #ifdef BCM_HOST_BUF
2572 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2573 ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
2574 #else
2575 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2576
2577 #endif /* BCM_HOST_BUF */
2578
2579 goto fail;
2580 }
2581
2582 /* scratch buffer bus throughput measurement */
2583 if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2584 goto fail;
2585 }
2586
2587 #ifdef DHD_RX_CHAINING
2588 dhd_rxchain_reset(&prot->rxchain);
2589 #endif // endif
2590
2591 prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2592 if (prot->pktid_ctrl_map == NULL) {
2593 goto fail;
2594 }
2595
2596 prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2597 if (prot->pktid_rx_map == NULL)
2598 goto fail;
2599
2600 prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2601 if (prot->pktid_tx_map == NULL)
2602 goto fail;
2603
2604 #ifdef IOCTLRESP_USE_CONSTMEM
2605 prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2606 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2607 if (prot->pktid_map_handle_ioctl == NULL) {
2608 goto fail;
2609 }
2610 #endif /* IOCTLRESP_USE_CONSTMEM */
2611
2612 #ifdef DHD_MAP_PKTID_LOGGING
2613 prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2614 if (prot->pktid_dma_map == NULL) {
2615 DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2616 __FUNCTION__));
2617 }
2618
2619 prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
2620 if (prot->pktid_dma_unmap == NULL) {
2621 DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2622 __FUNCTION__));
2623 }
2624 #endif /* DHD_MAP_PKTID_LOGGING */
2625
2626 /* Initialize the work queues to be used by the Load Balancing logic */
2627 #if defined(DHD_LB_TXC)
2628 {
2629 void *buffer;
2630 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2631 if (buffer == NULL) {
2632 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2633 goto fail;
2634 }
2635 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2636 buffer, DHD_LB_WORKQ_SZ);
2637 prot->tx_compl_prod_sync = 0;
2638 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2639 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2640 }
2641 #endif /* DHD_LB_TXC */
2642
2643 #if defined(DHD_LB_RXC)
2644 {
2645 void *buffer;
2646 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2647 if (buffer == NULL) {
2648 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
2649 goto fail;
2650 }
2651 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
2652 buffer, DHD_LB_WORKQ_SZ);
2653 prot->rx_compl_prod_sync = 0;
2654 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2655 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2656 }
2657 #endif /* DHD_LB_RXC */
2658
2659 /* Initialize trap buffer */
2660 if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
2661 DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
2662 goto fail;
2663 }
2664
2665 return BCME_OK;
2666
2667 fail:
2668
2669 if (prot) {
2670 /* Free up all allocated memories */
2671 dhd_prot_detach(dhd);
2672 }
2673
2674 return BCME_NOMEM;
2675 } /* dhd_prot_attach */
2676
2677 void
2678 dhd_set_host_cap(dhd_pub_t *dhd)
2679 {
2680 uint32 data = 0;
2681 dhd_prot_t *prot = dhd->prot;
2682 #ifdef D2H_MINIDUMP
2683 uint16 host_trap_addr_len;
2684 #endif /* D2H_MINIDUMP */
2685
2686 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2687 if (dhd->h2d_phase_supported) {
2688 data |= HOSTCAP_H2D_VALID_PHASE;
2689 if (dhd->force_dongletrap_on_bad_h2d_phase)
2690 data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
2691 }
2692 if (prot->host_ipc_version > prot->device_ipc_version)
2693 prot->active_ipc_version = prot->device_ipc_version;
2694 else
2695 prot->active_ipc_version = prot->host_ipc_version;
2696
2697 data |= prot->active_ipc_version;
2698
2699 if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
2700 DHD_INFO(("Advertise Hostready Capability\n"));
2701 data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
2702 }
2703 {
2704 /* Disable DS altogether */
2705 data |= HOSTCAP_DS_NO_OOB_DW;
2706 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
2707 }
2708
2709 /* Indicate support for extended trap data */
2710 data |= HOSTCAP_EXTENDED_TRAP_DATA;
2711
2712 /* Indicate support for TX status metadata */
2713 if (dhd->pcie_txs_metadata_enable != 0)
2714 data |= HOSTCAP_TXSTATUS_METADATA;
2715
2716 /* Enable fast delete ring in firmware if supported */
2717 if (dhd->fast_delete_ring_support) {
2718 data |= HOSTCAP_FAST_DELETE_RING;
2719 }
2720
2721 if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
2722 DHD_ERROR(("IDMA inited\n"));
2723 data |= HOSTCAP_H2D_IDMA;
2724 dhd->idma_inited = TRUE;
2725 }
2726
2727 if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
2728 DHD_ERROR(("IFRM Inited\n"));
2729 data |= HOSTCAP_H2D_IFRM;
2730 dhd->ifrm_inited = TRUE;
2731 dhd->dma_h2d_ring_upd_support = FALSE;
2732 dhd_prot_dma_indx_free(dhd);
2733 }
2734
2735 if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
2736 DHD_ERROR(("DAR doorbell Use\n"));
2737 data |= HOSTCAP_H2D_DAR;
2738 dhd->dar_inited = TRUE;
2739 }
2740
2741 data |= HOSTCAP_UR_FW_NO_TRAP;
2742
2743 #ifdef D2H_MINIDUMP
2744 if (dhd_bus_is_minidump_enabled(dhd)) {
2745 data |= HOSTCAP_EXT_TRAP_DBGBUF;
2746 DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
2747 }
2748 #endif /* D2H_MINIDUMP */
2749 DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
2750 __FUNCTION__,
2751 prot->active_ipc_version, prot->host_ipc_version,
2752 prot->device_ipc_version));
2753
2754 dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
2755 dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
2756 sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
2757 #ifdef D2H_MINIDUMP
2758 if (dhd_bus_is_minidump_enabled(dhd)) {
2759 /* Dongle expects the host_trap_addr_len in terms of words */
2760 host_trap_addr_len = BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN/ 4;
2761 dhd_bus_cmn_writeshared(dhd->bus, &host_trap_addr_len,
2762 sizeof(host_trap_addr_len), DNGL_TO_HOST_TRAP_ADDR_LEN, 0);
2763 }
2764 #endif /* D2H_MINIDUMP */
2765
2766 }
2767
2768 }
2769
2770 /**
2771 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2772 * completed it's initialization of the pcie_shared structure, we may now fetch
2773 * the dongle advertized features and adjust the protocol layer accordingly.
2774 *
2775 * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2776 */
2777 int
2778 dhd_prot_init(dhd_pub_t *dhd)
2779 {
2780 sh_addr_t base_addr;
2781 dhd_prot_t *prot = dhd->prot;
2782 int ret = 0;
2783 uint32 idmacontrol;
2784 uint32 waitcount = 0;
2785
2786 #ifdef WL_MONITOR
2787 dhd->monitor_enable = FALSE;
2788 #endif /* WL_MONITOR */
2789
2790 /**
2791 * A user defined value can be assigned to global variable h2d_max_txpost via
2792 * 1. DHD IOVAR h2d_max_txpost, before firmware download
2793 * 2. module parameter h2d_max_txpost
2794 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
2795 * if user has not defined any buffers by one of the above methods.
2796 */
2797 prot->h2d_max_txpost = (uint16)h2d_max_txpost;
2798
2799 DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
2800
2801 /* Read max rx packets supported by dongle */
2802 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
2803 if (prot->max_rxbufpost == 0) {
2804 /* This would happen if the dongle firmware is not */
2805 /* using the latest shared structure template */
2806 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
2807 }
2808 DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
2809
2810 /* Initialize. bzero() would blow away the dma pointers. */
2811 prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
2812 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
2813 prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
2814 prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
2815
2816 prot->cur_ioctlresp_bufs_posted = 0;
2817 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
2818 prot->data_seq_no = 0;
2819 prot->ioctl_seq_no = 0;
2820 prot->rxbufpost = 0;
2821 prot->cur_event_bufs_posted = 0;
2822 prot->ioctl_state = 0;
2823 prot->curr_ioctl_cmd = 0;
2824 prot->cur_ts_bufs_posted = 0;
2825 prot->infobufpost = 0;
2826
2827 prot->dmaxfer.srcmem.va = NULL;
2828 prot->dmaxfer.dstmem.va = NULL;
2829 prot->dmaxfer.in_progress = FALSE;
2830
2831 prot->metadata_dbg = FALSE;
2832 prot->rx_metadata_offset = 0;
2833 prot->tx_metadata_offset = 0;
2834 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
2835
2836 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2837 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
2838 prot->ioctl_state = 0;
2839 prot->ioctl_status = 0;
2840 prot->ioctl_resplen = 0;
2841 prot->ioctl_received = IOCTL_WAIT;
2842
2843 /* Initialize Common MsgBuf Rings */
2844
2845 prot->device_ipc_version = dhd->bus->api.fw_rev;
2846 prot->host_ipc_version = PCIE_SHARED_VERSION;
2847
2848 /* Init the host API version */
2849 dhd_set_host_cap(dhd);
2850
2851 /* Register the interrupt function upfront */
2852 /* remove corerev checks in data path */
2853 /* do this after host/fw negotiation for DAR */
2854 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
2855 prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
2856
2857 dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
2858
2859 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
2860 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
2861 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
2862
2863 /* Make it compatibile with pre-rev7 Firmware */
2864 if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
2865 prot->d2hring_tx_cpln.item_len =
2866 D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
2867 prot->d2hring_rx_cpln.item_len =
2868 D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
2869 }
2870 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
2871 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
2872
2873 dhd_prot_d2h_sync_init(dhd);
2874
2875 dhd_prot_h2d_sync_init(dhd);
2876
2877 /* init the scratch buffer */
2878 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
2879 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2880 D2H_DMA_SCRATCH_BUF, 0);
2881 dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
2882 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
2883
2884 /* If supported by the host, indicate the memory block
2885 * for completion writes / submission reads to shared space
2886 */
2887 if (dhd->dma_d2h_ring_upd_support) {
2888 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
2889 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2890 D2H_DMA_INDX_WR_BUF, 0);
2891 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
2892 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2893 H2D_DMA_INDX_RD_BUF, 0);
2894 }
2895
2896 if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
2897 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
2898 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2899 H2D_DMA_INDX_WR_BUF, 0);
2900 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
2901 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2902 D2H_DMA_INDX_RD_BUF, 0);
2903 }
2904
2905 /* Signal to the dongle that common ring init is complete */
2906 dhd_bus_hostready(dhd->bus);
2907
2908 /*
2909 * If the DMA-able buffers for flowring needs to come from a specific
2910 * contiguous memory region, then setup prot->flowrings_dma_buf here.
2911 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2912 * this contiguous memory region, for each of the flowrings.
2913 */
2914
2915 /* Pre-allocate pool of msgbuf_ring for flowrings */
2916 if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
2917 return BCME_ERROR;
2918 }
2919
2920 /* If IFRM is enabled, wait for FW to setup the DMA channel */
2921 if (IFRM_ENAB(dhd)) {
2922 dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
2923 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2924 H2D_IFRM_INDX_WR_BUF, 0);
2925 }
2926
2927 /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
2928 * Waiting just before configuring doorbell
2929 */
2930 #define IDMA_ENABLE_WAIT 10
2931 if (IDMA_ACTIVE(dhd)) {
2932 /* wait for idma_en bit in IDMAcontrol register to be set */
2933 /* Loop till idma_en is not set */
2934 uint buscorerev = dhd->bus->sih->buscorerev;
2935 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
2936 IDMAControl(buscorerev), 0, 0);
2937 while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
2938 (waitcount++ < IDMA_ENABLE_WAIT)) {
2939
2940 DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
2941 waitcount, idmacontrol));
2942 OSL_DELAY(1000); /* 1ms as its onetime only */
2943 idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
2944 IDMAControl(buscorerev), 0, 0);
2945 }
2946
2947 if (waitcount < IDMA_ENABLE_WAIT) {
2948 DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
2949 } else {
2950 DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
2951 waitcount, idmacontrol));
2952 return BCME_ERROR;
2953 }
2954 }
2955
2956 /* Host should configure soft doorbells if needed ... here */
2957
2958 /* Post to dongle host configured soft doorbells */
2959 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
2960
2961 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
2962 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
2963
2964 prot->no_retry = FALSE;
2965 prot->no_aggr = FALSE;
2966 prot->fixed_rate = FALSE;
2967
2968 /*
2969 * Note that any communication with the Dongle should be added
2970 * below this point. Any other host data structure initialiation that
2971 * needs to be done prior to the DPC starts executing should be done
2972 * befor this point.
2973 * Because once we start sending H2D requests to Dongle, the Dongle
2974 * respond immediately. So the DPC context to handle this
2975 * D2H response could preempt the context in which dhd_prot_init is running.
2976 * We want to ensure that all the Host part of dhd_prot_init is
2977 * done before that.
2978 */
2979
2980 /* See if info rings could be created */
2981 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2982 if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
2983 /* For now log and proceed, further clean up action maybe necessary
2984 * when we have more clarity.
2985 */
2986 DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
2987 __FUNCTION__, ret));
2988 }
2989 }
2990
2991 return BCME_OK;
2992 } /* dhd_prot_init */
2993
2994 /**
2995 * dhd_prot_detach - PCIE FD protocol layer destructor.
2996 * Unlink, frees allocated protocol memory (including dhd_prot)
2997 */
2998 void dhd_prot_detach(dhd_pub_t *dhd)
2999 {
3000 dhd_prot_t *prot = dhd->prot;
3001
3002 /* Stop the protocol module */
3003 if (prot) {
3004
3005 /* free up all DMA-able buffers allocated during prot attach/init */
3006
3007 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
3008 dhd_dma_buf_free(dhd, &prot->retbuf);
3009 dhd_dma_buf_free(dhd, &prot->ioctbuf);
3010 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
3011 dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
3012 dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
3013
3014 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3015 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
3016 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
3017 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
3018 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
3019
3020 dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
3021
3022 /* Common MsgBuf Rings */
3023 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
3024 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
3025 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
3026 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
3027 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
3028
3029 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3030 dhd_prot_flowrings_pool_detach(dhd);
3031
3032 /* detach info rings */
3033 dhd_prot_detach_info_rings(dhd);
3034
3035 /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3036 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3037 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3038 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3039 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3040 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3041 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3042 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3043 */
3044 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
3045 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
3046 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
3047 #ifdef IOCTLRESP_USE_CONSTMEM
3048 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3049 #endif // endif
3050 #ifdef DHD_MAP_PKTID_LOGGING
3051 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
3052 DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
3053 #endif /* DHD_MAP_PKTID_LOGGING */
3054
3055 #if defined(DHD_LB_TXC)
3056 if (prot->tx_compl_prod.buffer)
3057 MFREE(dhd->osh, prot->tx_compl_prod.buffer,
3058 sizeof(void*) * DHD_LB_WORKQ_SZ);
3059 #endif /* DHD_LB_TXC */
3060 #if defined(DHD_LB_RXC)
3061 if (prot->rx_compl_prod.buffer)
3062 MFREE(dhd->osh, prot->rx_compl_prod.buffer,
3063 sizeof(void*) * DHD_LB_WORKQ_SZ);
3064 #endif /* DHD_LB_RXC */
3065
3066 DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
3067
3068 dhd->prot = NULL;
3069 }
3070 } /* dhd_prot_detach */
3071
3072 /**
3073 * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3074 * This may be invoked to soft reboot the dongle, without having to
3075 * detach and attach the entire protocol layer.
3076 *
3077 * After dhd_prot_reset(), dhd_prot_init() may be invoked
3078 * without going througha dhd_prot_attach() phase.
3079 */
3080 void
3081 dhd_prot_reset(dhd_pub_t *dhd)
3082 {
3083 struct dhd_prot *prot = dhd->prot;
3084
3085 DHD_TRACE(("%s\n", __FUNCTION__));
3086
3087 if (prot == NULL) {
3088 return;
3089 }
3090
3091 dhd_prot_flowrings_pool_reset(dhd);
3092
3093 /* Reset Common MsgBuf Rings */
3094 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
3095 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
3096 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
3097 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
3098 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
3099
3100 /* Reset info rings */
3101 if (prot->h2dring_info_subn) {
3102 dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
3103 }
3104
3105 if (prot->d2hring_info_cpln) {
3106 dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
3107 }
3108
3109 /* Reset all DMA-able buffers allocated during prot attach */
3110 dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
3111 dhd_dma_buf_reset(dhd, &prot->retbuf);
3112 dhd_dma_buf_reset(dhd, &prot->ioctbuf);
3113 dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
3114 dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
3115 dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
3116
3117 dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
3118
3119 /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3120 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
3121 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
3122 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
3123 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
3124
3125 prot->rx_metadata_offset = 0;
3126 prot->tx_metadata_offset = 0;
3127
3128 prot->rxbufpost = 0;
3129 prot->cur_event_bufs_posted = 0;
3130 prot->cur_ioctlresp_bufs_posted = 0;
3131
3132 OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
3133 prot->data_seq_no = 0;
3134 prot->ioctl_seq_no = 0;
3135 prot->ioctl_state = 0;
3136 prot->curr_ioctl_cmd = 0;
3137 prot->ioctl_received = IOCTL_WAIT;
3138 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3139 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
3140
3141 /* dhd_flow_rings_init is located at dhd_bus_start,
3142 * so when stopping bus, flowrings shall be deleted
3143 */
3144 if (dhd->flow_rings_inited) {
3145 dhd_flow_rings_deinit(dhd);
3146 }
3147
3148 /* Reset PKTID map */
3149 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
3150 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
3151 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
3152 #ifdef IOCTLRESP_USE_CONSTMEM
3153 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
3154 #endif /* IOCTLRESP_USE_CONSTMEM */
3155 #ifdef DMAMAP_STATS
3156 dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
3157 dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
3158 #ifndef IOCTLRESP_USE_CONSTMEM
3159 dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
3160 #endif /* IOCTLRESP_USE_CONSTMEM */
3161 dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
3162 dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
3163 dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
3164 #endif /* DMAMAP_STATS */
3165 } /* dhd_prot_reset */
3166
3167 #if defined(DHD_LB_RXP)
3168 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
3169 #else /* !DHD_LB_RXP */
3170 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
3171 #endif /* !DHD_LB_RXP */
3172
3173 #if defined(DHD_LB_RXC)
3174 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
3175 #else /* !DHD_LB_RXC */
3176 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
3177 #endif /* !DHD_LB_RXC */
3178
3179 #if defined(DHD_LB_TXC)
3180 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
3181 #else /* !DHD_LB_TXC */
3182 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
3183 #endif /* !DHD_LB_TXC */
3184
3185 #if defined(DHD_LB)
3186 /* DHD load balancing: deferral of work to another online CPU */
3187 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3188 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
3189 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
3190 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
3191 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
3192
3193 #if defined(DHD_LB_RXP)
3194 /**
3195 * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3196 * to other CPU cores
3197 */
3198 static INLINE void
3199 dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
3200 {
3201 dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
3202 }
3203 #endif /* DHD_LB_RXP */
3204
3205 #if defined(DHD_LB_TXC)
3206 /**
3207 * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3208 * to other CPU cores
3209 */
3210 static INLINE void
3211 dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
3212 {
3213 bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
3214 dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
3215 }
3216
3217 /**
3218 * DHD load balanced tx completion tasklet handler, that will perform the
3219 * freeing of packets on the selected CPU. Packet pointers are delivered to
3220 * this tasklet via the tx complete workq.
3221 */
3222 void
3223 dhd_lb_tx_compl_handler(unsigned long data)
3224 {
3225 int elem_ix;
3226 void *pkt, **elem;
3227 dmaaddr_t pa;
3228 uint32 pa_len;
3229 dhd_pub_t *dhd = (dhd_pub_t *)data;
3230 dhd_prot_t *prot = dhd->prot;
3231 bcm_workq_t *workq = &prot->tx_compl_cons;
3232 uint32 count = 0;
3233
3234 int curr_cpu;
3235 curr_cpu = get_cpu();
3236 put_cpu();
3237
3238 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
3239
3240 while (1) {
3241 elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3242
3243 if (elem_ix == BCM_RING_EMPTY) {
3244 break;
3245 }
3246
3247 elem = WORKQ_ELEMENT(void *, workq, elem_ix);
3248 pkt = *elem;
3249
3250 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
3251
3252 OSL_PREFETCH(PKTTAG(pkt));
3253 OSL_PREFETCH(pkt);
3254
3255 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
3256 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
3257
3258 DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
3259 #if defined(BCMPCIE)
3260 dhd_txcomplete(dhd, pkt, true);
3261 #endif // endif
3262
3263 PKTFREE(dhd->osh, pkt, TRUE);
3264 count++;
3265 }
3266
3267 /* smp_wmb(); */
3268 bcm_workq_cons_sync(workq);
3269 DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
3270 }
3271 #endif /* DHD_LB_TXC */
3272
3273 #if defined(DHD_LB_RXC)
3274
3275 /**
3276 * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3277 * to other CPU cores
3278 */
3279 static INLINE void
3280 dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
3281 {
3282 dhd_prot_t *prot = dhdp->prot;
3283 /* Schedule the takslet only if we have to */
3284 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
3285 /* flush WR index */
3286 bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
3287 dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
3288 }
3289 }
3290
3291 void
3292 dhd_lb_rx_compl_handler(unsigned long data)
3293 {
3294 dhd_pub_t *dhd = (dhd_pub_t *)data;
3295 bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
3296
3297 DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
3298
3299 dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
3300 bcm_workq_cons_sync(workq);
3301 }
3302 #endif /* DHD_LB_RXC */
3303 #endif /* DHD_LB */
3304
3305 void
3306 dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
3307 {
3308 dhd_prot_t *prot = dhd->prot;
3309 prot->rx_dataoffset = rx_offset;
3310 }
3311
3312 static int
3313 dhd_check_create_info_rings(dhd_pub_t *dhd)
3314 {
3315 dhd_prot_t *prot = dhd->prot;
3316 int ret = BCME_ERROR;
3317 uint16 ringid;
3318
3319 {
3320 /* dongle may increase max_submission_rings so keep
3321 * ringid at end of dynamic rings
3322 */
3323 ringid = dhd->bus->max_tx_flowrings +
3324 (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
3325 BCMPCIE_H2D_COMMON_MSGRINGS;
3326 }
3327
3328 if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
3329 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
3330 }
3331
3332 if (prot->h2dring_info_subn == NULL) {
3333 prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3334
3335 if (prot->h2dring_info_subn == NULL) {
3336 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3337 __FUNCTION__));
3338 return BCME_NOMEM;
3339 }
3340
3341 DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
3342 ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
3343 H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
3344 ringid);
3345 if (ret != BCME_OK) {
3346 DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3347 __FUNCTION__));
3348 goto err;
3349 }
3350 }
3351
3352 if (prot->d2hring_info_cpln == NULL) {
3353 prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
3354
3355 if (prot->d2hring_info_cpln == NULL) {
3356 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3357 __FUNCTION__));
3358 return BCME_NOMEM;
3359 }
3360
3361 /* create the debug info completion ring next to debug info submit ring
3362 * ringid = id next to debug info submit ring
3363 */
3364 ringid = ringid + 1;
3365
3366 DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3367 ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3368 D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3369 ringid);
3370 if (ret != BCME_OK) {
3371 DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3372 __FUNCTION__));
3373 dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3374 goto err;
3375 }
3376 }
3377
3378 return ret;
3379 err:
3380 MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3381 prot->h2dring_info_subn = NULL;
3382
3383 if (prot->d2hring_info_cpln) {
3384 MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3385 prot->d2hring_info_cpln = NULL;
3386 }
3387 return ret;
3388 } /* dhd_check_create_info_rings */
3389
3390 int
3391 dhd_prot_init_info_rings(dhd_pub_t *dhd)
3392 {
3393 dhd_prot_t *prot = dhd->prot;
3394 int ret = BCME_OK;
3395
3396 if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3397 DHD_ERROR(("%s: info rings aren't created! \n",
3398 __FUNCTION__));
3399 return ret;
3400 }
3401
3402 if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3403 DHD_INFO(("Info completion ring was created!\n"));
3404 return ret;
3405 }
3406
3407 DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3408 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
3409 BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
3410 if (ret != BCME_OK)
3411 return ret;
3412
3413 prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3414
3415 DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3416 prot->h2dring_info_subn->n_completion_ids = 1;
3417 prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3418
3419 ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
3420 BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
3421
3422 /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3423 * so can not cleanup if one ring was created while the other failed
3424 */
3425 return ret;
3426 } /* dhd_prot_init_info_rings */
3427
3428 static void
3429 dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3430 {
3431 if (dhd->prot->h2dring_info_subn) {
3432 dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3433 MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3434 dhd->prot->h2dring_info_subn = NULL;
3435 }
3436 if (dhd->prot->d2hring_info_cpln) {
3437 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3438 MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3439 dhd->prot->d2hring_info_cpln = NULL;
3440 }
3441 }
3442
3443 /**
3444 * Initialize protocol: sync w/dongle state.
3445 * Sets dongle media info (iswl, drv_version, mac address).
3446 */
3447 int dhd_sync_with_dongle(dhd_pub_t *dhd)
3448 {
3449 int ret = 0;
3450 wlc_rev_info_t revinfo;
3451 char buf[128];
3452 dhd_prot_t *prot = dhd->prot;
3453
3454 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3455
3456 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3457
3458 /* Post ts buffer after shim layer is attached */
3459 ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
3460
3461 #ifdef DHD_FW_COREDUMP
3462 /* Check the memdump capability */
3463 dhd_get_memdump_info(dhd);
3464 #endif /* DHD_FW_COREDUMP */
3465 #ifdef BCMASSERT_LOG
3466 dhd_get_assert_info(dhd);
3467 #endif /* BCMASSERT_LOG */
3468
3469 /* Get the device rev info */
3470 memset(&revinfo, 0, sizeof(revinfo));
3471 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
3472 if (ret < 0) {
3473 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
3474 goto done;
3475 }
3476 DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
3477 revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
3478
3479 /* Get the RxBuf post size */
3480 memset(buf, 0, sizeof(buf));
3481 bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
3482 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
3483 if (ret < 0) {
3484 DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
3485 __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
3486 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3487 } else {
3488 memcpy(&(prot->rxbufpost_sz), buf, sizeof(uint16));
3489 if ((prot->rxbufpost_sz < DHD_FLOWRING_RX_BUFPOST_PKTSZ) ||
3490 (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX)) {
3491 DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
3492 __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
3493 prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3494 } else {
3495 DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
3496 }
3497 }
3498
3499 /* Post buffers for packet reception */
3500 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
3501
3502 DHD_SSSR_DUMP_INIT(dhd);
3503
3504 dhd_process_cid_mac(dhd, TRUE);
3505 ret = dhd_preinit_ioctls(dhd);
3506 dhd_process_cid_mac(dhd, FALSE);
3507
3508 /* Always assumes wl for now */
3509 dhd->iswl = TRUE;
3510 done:
3511 return ret;
3512 } /* dhd_sync_with_dongle */
3513
3514 #define DHD_DBG_SHOW_METADATA 0
3515
3516 #if DHD_DBG_SHOW_METADATA
3517 static void BCMFASTPATH
3518 dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
3519 {
3520 uint8 tlv_t;
3521 uint8 tlv_l;
3522 uint8 *tlv_v = (uint8 *)ptr;
3523
3524 if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
3525 return;
3526
3527 len -= BCMPCIE_D2H_METADATA_HDRLEN;
3528 tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
3529
3530 while (len > TLV_HDR_LEN) {
3531 tlv_t = tlv_v[TLV_TAG_OFF];
3532 tlv_l = tlv_v[TLV_LEN_OFF];
3533
3534 len -= TLV_HDR_LEN;
3535 tlv_v += TLV_HDR_LEN;
3536 if (len < tlv_l)
3537 break;
3538 if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
3539 break;
3540
3541 switch (tlv_t) {
3542 case WLFC_CTL_TYPE_TXSTATUS: {
3543 uint32 txs;
3544 memcpy(&txs, tlv_v, sizeof(uint32));
3545 if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
3546 printf("METADATA TX_STATUS: %08x\n", txs);
3547 } else {
3548 wl_txstatus_additional_info_t tx_add_info;
3549 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
3550 sizeof(wl_txstatus_additional_info_t));
3551 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
3552 " rate = %08x tries = %d - %d\n", txs,
3553 tx_add_info.seq, tx_add_info.entry_ts,
3554 tx_add_info.enq_ts, tx_add_info.last_ts,
3555 tx_add_info.rspec, tx_add_info.rts_cnt,
3556 tx_add_info.tx_cnt);
3557 }
3558 } break;
3559
3560 case WLFC_CTL_TYPE_RSSI: {
3561 if (tlv_l == 1)
3562 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
3563 else
3564 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
3565 (*(tlv_v + 3) << 8) | *(tlv_v + 2),
3566 (int8)(*tlv_v), *(tlv_v + 1));
3567 } break;
3568
3569 case WLFC_CTL_TYPE_FIFO_CREDITBACK:
3570 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
3571 break;
3572
3573 case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
3574 bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
3575 break;
3576
3577 case WLFC_CTL_TYPE_RX_STAMP: {
3578 struct {
3579 uint32 rspec;
3580 uint32 bus_time;
3581 uint32 wlan_time;
3582 } rx_tmstamp;
3583 memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
3584 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
3585 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
3586 } break;
3587
3588 case WLFC_CTL_TYPE_TRANS_ID:
3589 bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
3590 break;
3591
3592 case WLFC_CTL_TYPE_COMP_TXSTATUS:
3593 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
3594 break;
3595
3596 default:
3597 bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
3598 break;
3599 }
3600
3601 len -= tlv_l;
3602 tlv_v += tlv_l;
3603 }
3604 }
3605 #endif /* DHD_DBG_SHOW_METADATA */
3606
3607 static INLINE void BCMFASTPATH
3608 dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
3609 {
3610 if (pkt) {
3611 if (pkttype == PKTTYPE_IOCTL_RX ||
3612 pkttype == PKTTYPE_EVENT_RX ||
3613 pkttype == PKTTYPE_INFO_RX ||
3614 pkttype == PKTTYPE_TSBUF_RX) {
3615 #ifdef DHD_USE_STATIC_CTRLBUF
3616 PKTFREE_STATIC(dhd->osh, pkt, send);
3617 #else
3618 PKTFREE(dhd->osh, pkt, send);
3619 #endif /* DHD_USE_STATIC_CTRLBUF */
3620 } else {
3621 PKTFREE(dhd->osh, pkt, send);
3622 }
3623 }
3624 }
3625
3626 /**
3627 * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
3628 * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
3629 * to ensure thread safety, so no need to hold any locks for this function
3630 */
3631 static INLINE void * BCMFASTPATH
3632 dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
3633 {
3634 void *PKTBUF;
3635 dmaaddr_t pa;
3636 uint32 len;
3637 void *dmah;
3638 void *secdma;
3639
3640 #ifdef DHD_PCIE_PKTID
3641 if (free_pktid) {
3642 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
3643 pktid, pa, len, dmah, secdma, pkttype);
3644 } else {
3645 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
3646 pktid, pa, len, dmah, secdma, pkttype);
3647 }
3648 #else
3649 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
3650 len, dmah, secdma, pkttype);
3651 #endif /* DHD_PCIE_PKTID */
3652 if (PKTBUF) {
3653 {
3654 if (SECURE_DMA_ENAB(dhd->osh))
3655 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
3656 secdma, 0);
3657 else
3658 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
3659 #ifdef DMAMAP_STATS
3660 switch (pkttype) {
3661 #ifndef IOCTLRESP_USE_CONSTMEM
3662 case PKTTYPE_IOCTL_RX:
3663 dhd->dma_stats.ioctl_rx--;
3664 dhd->dma_stats.ioctl_rx_sz -= len;
3665 break;
3666 #endif /* IOCTLRESP_USE_CONSTMEM */
3667 case PKTTYPE_EVENT_RX:
3668 dhd->dma_stats.event_rx--;
3669 dhd->dma_stats.event_rx_sz -= len;
3670 break;
3671 case PKTTYPE_INFO_RX:
3672 dhd->dma_stats.info_rx--;
3673 dhd->dma_stats.info_rx_sz -= len;
3674 break;
3675 case PKTTYPE_TSBUF_RX:
3676 dhd->dma_stats.tsbuf_rx--;
3677 dhd->dma_stats.tsbuf_rx_sz -= len;
3678 break;
3679 }
3680 #endif /* DMAMAP_STATS */
3681 }
3682 }
3683
3684 return PKTBUF;
3685 }
3686
3687 #ifdef IOCTLRESP_USE_CONSTMEM
3688 static INLINE void BCMFASTPATH
3689 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
3690 {
3691 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3692 retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
3693 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
3694
3695 return;
3696 }
3697 #endif // endif
3698
3699 static void BCMFASTPATH
3700 dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
3701 {
3702 dhd_prot_t *prot = dhd->prot;
3703 int16 fillbufs;
3704 uint16 cnt = 256;
3705 int retcount = 0;
3706
3707 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3708 while (fillbufs >= RX_BUF_BURST) {
3709 cnt--;
3710 if (cnt == 0) {
3711 /* find a better way to reschedule rx buf post if space not available */
3712 DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
3713 DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
3714 break;
3715 }
3716
3717 /* Post in a burst of 32 buffers at a time */
3718 fillbufs = MIN(fillbufs, RX_BUF_BURST);
3719
3720 /* Post buffers */
3721 retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
3722
3723 if (retcount >= 0) {
3724 prot->rxbufpost += (uint16)retcount;
3725 #ifdef DHD_LB_RXC
3726 /* dhd_prot_rxbuf_post returns the number of buffers posted */
3727 DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
3728 #endif /* DHD_LB_RXC */
3729 /* how many more to post */
3730 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3731 } else {
3732 /* Make sure we don't run loop any further */
3733 fillbufs = 0;
3734 }
3735 }
3736 }
3737
3738 /** Post 'count' no of rx buffers to dongle */
3739 static int BCMFASTPATH
3740 dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
3741 {
3742 void *p, **pktbuf;
3743 uint8 *rxbuf_post_tmp;
3744 host_rxbuf_post_t *rxbuf_post;
3745 void *msg_start;
3746 dmaaddr_t pa, *pktbuf_pa;
3747 uint32 *pktlen;
3748 uint16 i = 0, alloced = 0;
3749 unsigned long flags;
3750 uint32 pktid;
3751 dhd_prot_t *prot = dhd->prot;
3752 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
3753 void *lcl_buf;
3754 uint16 lcl_buf_size;
3755 uint16 pktsz = prot->rxbufpost_sz;
3756
3757 /* allocate a local buffer to store pkt buffer va, pa and length */
3758 lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
3759 RX_BUF_BURST;
3760 lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
3761 if (!lcl_buf) {
3762 DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
3763 return 0;
3764 }
3765 pktbuf = lcl_buf;
3766 pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
3767 pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
3768
3769 for (i = 0; i < count; i++) {
3770 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
3771 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
3772 dhd->rx_pktgetfail++;
3773 break;
3774 }
3775
3776 pktlen[i] = PKTLEN(dhd->osh, p);
3777 if (SECURE_DMA_ENAB(dhd->osh)) {
3778 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
3779 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3780 }
3781 #ifndef BCM_SECURE_DMA
3782 else
3783 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
3784 #endif /* #ifndef BCM_SECURE_DMA */
3785
3786 if (PHYSADDRISZERO(pa)) {
3787 PKTFREE(dhd->osh, p, FALSE);
3788 DHD_ERROR(("Invalid phyaddr 0\n"));
3789 ASSERT(0);
3790 break;
3791 }
3792 #ifdef DMAMAP_STATS
3793 dhd->dma_stats.rxdata++;
3794 dhd->dma_stats.rxdata_sz += pktlen[i];
3795 #endif /* DMAMAP_STATS */
3796
3797 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
3798 pktlen[i] = PKTLEN(dhd->osh, p);
3799 pktbuf[i] = p;
3800 pktbuf_pa[i] = pa;
3801 }
3802
3803 /* only post what we have */
3804 count = i;
3805
3806 /* grab the ring lock to allocate pktid and post on ring */
3807 DHD_RING_LOCK(ring->ring_lock, flags);
3808
3809 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3810 msg_start = (void *)
3811 dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
3812 if (msg_start == NULL) {
3813 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3814 DHD_RING_UNLOCK(ring->ring_lock, flags);
3815 goto cleanup;
3816 }
3817 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3818 ASSERT(alloced > 0);
3819
3820 rxbuf_post_tmp = (uint8*)msg_start;
3821
3822 for (i = 0; i < alloced; i++) {
3823 rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
3824 p = pktbuf[i];
3825 pa = pktbuf_pa[i];
3826
3827 #if defined(DHD_LB_RXC)
3828 if (use_rsv_pktid == TRUE) {
3829 bcm_workq_t *workq = &prot->rx_compl_cons;
3830 int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3831
3832 if (elem_ix == BCM_RING_EMPTY) {
3833 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
3834 pktid = DHD_PKTID_INVALID;
3835 goto alloc_pkt_id;
3836 } else {
3837 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
3838 pktid = *elem;
3839 }
3840
3841 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3842
3843 /* Now populate the previous locker with valid information */
3844 if (pktid != DHD_PKTID_INVALID) {
3845 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
3846 p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
3847 PKTTYPE_DATA_RX);
3848 }
3849 } else
3850 #endif /* ! DHD_LB_RXC */
3851 {
3852 #if defined(DHD_LB_RXC)
3853 alloc_pkt_id:
3854 #endif /* DHD_LB_RXC */
3855 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
3856 pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
3857 #if defined(DHD_PCIE_PKTID)
3858 if (pktid == DHD_PKTID_INVALID) {
3859 break;
3860 }
3861 #endif /* DHD_PCIE_PKTID */
3862 }
3863
3864 /* Common msg header */
3865 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
3866 rxbuf_post->cmn_hdr.if_id = 0;
3867 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3868 rxbuf_post->cmn_hdr.flags = ring->current_phase;
3869 ring->seqnum++;
3870 rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
3871 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3872 rxbuf_post->data_buf_addr.low_addr =
3873 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
3874
3875 if (prot->rx_metadata_offset) {
3876 rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
3877 rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3878 rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3879 } else {
3880 rxbuf_post->metadata_buf_len = 0;
3881 rxbuf_post->metadata_buf_addr.high_addr = 0;
3882 rxbuf_post->metadata_buf_addr.low_addr = 0;
3883 }
3884
3885 #ifdef DHD_PKTID_AUDIT_RING
3886 DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
3887 #endif /* DHD_PKTID_AUDIT_RING */
3888
3889 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3890
3891 /* Move rxbuf_post_tmp to next item */
3892 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
3893
3894 #ifdef DHD_LBUF_AUDIT
3895 PKTAUDIT(dhd->osh, p);
3896 #endif // endif
3897 }
3898
3899 if (i < alloced) {
3900 if (ring->wr < (alloced - i))
3901 ring->wr = ring->max_items - (alloced - i);
3902 else
3903 ring->wr -= (alloced - i);
3904
3905 if (ring->wr == 0) {
3906 DHD_INFO(("%s: flipping the phase now\n", ring->name));
3907 ring->current_phase = ring->current_phase ?
3908 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3909 }
3910
3911 alloced = i;
3912 }
3913
3914 /* update ring's WR index and ring doorbell to dongle */
3915 if (alloced > 0) {
3916 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3917 }
3918
3919 DHD_RING_UNLOCK(ring->ring_lock, flags);
3920
3921 cleanup:
3922 for (i = alloced; i < count; i++) {
3923 p = pktbuf[i];
3924 pa = pktbuf_pa[i];
3925
3926 if (SECURE_DMA_ENAB(dhd->osh))
3927 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
3928 DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
3929 else
3930 DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
3931 PKTFREE(dhd->osh, p, FALSE);
3932 }
3933
3934 MFREE(dhd->osh, lcl_buf, lcl_buf_size);
3935
3936 return alloced;
3937 } /* dhd_prot_rxbufpost */
3938
3939 static int
3940 dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
3941 {
3942 unsigned long flags;
3943 uint32 pktid;
3944 dhd_prot_t *prot = dhd->prot;
3945 uint16 alloced = 0;
3946 uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
3947 uint32 pktlen;
3948 info_buf_post_msg_t *infobuf_post;
3949 uint8 *infobuf_post_tmp;
3950 void *p;
3951 void* msg_start;
3952 uint8 i = 0;
3953 dmaaddr_t pa;
3954 int16 count = 0;
3955
3956 if (ring == NULL)
3957 return 0;
3958
3959 if (ring->inited != TRUE)
3960 return 0;
3961 if (ring == dhd->prot->h2dring_info_subn) {
3962 if (prot->max_infobufpost == 0)
3963 return 0;
3964
3965 count = prot->max_infobufpost - prot->infobufpost;
3966 }
3967 else {
3968 DHD_ERROR(("Unknown ring\n"));
3969 return 0;
3970 }
3971
3972 if (count <= 0) {
3973 DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
3974 __FUNCTION__));
3975 return 0;
3976 }
3977
3978 /* grab the ring lock to allocate pktid and post on ring */
3979 DHD_RING_LOCK(ring->ring_lock, flags);
3980
3981 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3982 msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
3983
3984 if (msg_start == NULL) {
3985 DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3986 DHD_RING_UNLOCK(ring->ring_lock, flags);
3987 return -1;
3988 }
3989
3990 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3991 ASSERT(alloced > 0);
3992
3993 infobuf_post_tmp = (uint8*) msg_start;
3994
3995 /* loop through each allocated message in the host ring */
3996 for (i = 0; i < alloced; i++) {
3997 infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
3998 /* Create a rx buffer */
3999 #ifdef DHD_USE_STATIC_CTRLBUF
4000 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4001 #else
4002 p = PKTGET(dhd->osh, pktsz, FALSE);
4003 #endif /* DHD_USE_STATIC_CTRLBUF */
4004 if (p == NULL) {
4005 DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
4006 dhd->rx_pktgetfail++;
4007 break;
4008 }
4009 pktlen = PKTLEN(dhd->osh, p);
4010 if (SECURE_DMA_ENAB(dhd->osh)) {
4011 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4012 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4013 }
4014 #ifndef BCM_SECURE_DMA
4015 else
4016 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4017 #endif /* #ifndef BCM_SECURE_DMA */
4018 if (PHYSADDRISZERO(pa)) {
4019 if (SECURE_DMA_ENAB(dhd->osh)) {
4020 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4021 ring->dma_buf.secdma, 0);
4022 }
4023 else
4024 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4025 #ifdef DHD_USE_STATIC_CTRLBUF
4026 PKTFREE_STATIC(dhd->osh, p, FALSE);
4027 #else
4028 PKTFREE(dhd->osh, p, FALSE);
4029 #endif /* DHD_USE_STATIC_CTRLBUF */
4030 DHD_ERROR(("Invalid phyaddr 0\n"));
4031 ASSERT(0);
4032 break;
4033 }
4034 #ifdef DMAMAP_STATS
4035 dhd->dma_stats.info_rx++;
4036 dhd->dma_stats.info_rx_sz += pktlen;
4037 #endif /* DMAMAP_STATS */
4038 pktlen = PKTLEN(dhd->osh, p);
4039
4040 /* Common msg header */
4041 infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
4042 infobuf_post->cmn_hdr.if_id = 0;
4043 infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4044 infobuf_post->cmn_hdr.flags = ring->current_phase;
4045 ring->seqnum++;
4046
4047 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
4048 pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
4049
4050 #if defined(DHD_PCIE_PKTID)
4051 if (pktid == DHD_PKTID_INVALID) {
4052 if (SECURE_DMA_ENAB(dhd->osh)) {
4053 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
4054 ring->dma_buf.secdma, 0);
4055 } else
4056 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
4057
4058 #ifdef DHD_USE_STATIC_CTRLBUF
4059 PKTFREE_STATIC(dhd->osh, p, FALSE);
4060 #else
4061 PKTFREE(dhd->osh, p, FALSE);
4062 #endif /* DHD_USE_STATIC_CTRLBUF */
4063 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4064 break;
4065 }
4066 #endif /* DHD_PCIE_PKTID */
4067
4068 infobuf_post->host_buf_len = htol16((uint16)pktlen);
4069 infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4070 infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4071
4072 #ifdef DHD_PKTID_AUDIT_RING
4073 DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
4074 #endif /* DHD_PKTID_AUDIT_RING */
4075
4076 DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4077 infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
4078 infobuf_post->host_buf_addr.high_addr));
4079
4080 infobuf_post->cmn_hdr.request_id = htol32(pktid);
4081 /* Move rxbuf_post_tmp to next item */
4082 infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
4083 #ifdef DHD_LBUF_AUDIT
4084 PKTAUDIT(dhd->osh, p);
4085 #endif // endif
4086 }
4087
4088 if (i < alloced) {
4089 if (ring->wr < (alloced - i))
4090 ring->wr = ring->max_items - (alloced - i);
4091 else
4092 ring->wr -= (alloced - i);
4093
4094 alloced = i;
4095 if (alloced && ring->wr == 0) {
4096 DHD_INFO(("%s: flipping the phase now\n", ring->name));
4097 ring->current_phase = ring->current_phase ?
4098 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4099 }
4100 }
4101
4102 /* Update the write pointer in TCM & ring bell */
4103 if (alloced > 0) {
4104 if (ring == dhd->prot->h2dring_info_subn) {
4105 prot->infobufpost += alloced;
4106 }
4107 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
4108 }
4109
4110 DHD_RING_UNLOCK(ring->ring_lock, flags);
4111
4112 return alloced;
4113 } /* dhd_prot_infobufpost */
4114
4115 #ifdef IOCTLRESP_USE_CONSTMEM
4116 static int
4117 alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4118 {
4119 int err;
4120 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
4121
4122 if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
4123 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
4124 ASSERT(0);
4125 return BCME_NOMEM;
4126 }
4127
4128 return BCME_OK;
4129 }
4130
4131 static void
4132 free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
4133 {
4134 /* retbuf (declared on stack) not fully populated ... */
4135 if (retbuf->va) {
4136 uint32 dma_pad;
4137 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
4138 retbuf->len = IOCT_RETBUF_SIZE;
4139 retbuf->_alloced = retbuf->len + dma_pad;
4140 }
4141
4142 dhd_dma_buf_free(dhd, retbuf);
4143 return;
4144 }
4145 #endif /* IOCTLRESP_USE_CONSTMEM */
4146
4147 static int
4148 dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
4149 {
4150 void *p;
4151 uint16 pktsz;
4152 ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
4153 dmaaddr_t pa;
4154 uint32 pktlen;
4155 dhd_prot_t *prot = dhd->prot;
4156 uint16 alloced = 0;
4157 unsigned long flags;
4158 dhd_dma_buf_t retbuf;
4159 void *dmah = NULL;
4160 uint32 pktid;
4161 void *map_handle;
4162 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
4163 bool non_ioctl_resp_buf = 0;
4164 dhd_pkttype_t buf_type;
4165
4166 if (dhd->busstate == DHD_BUS_DOWN) {
4167 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4168 return -1;
4169 }
4170 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4171
4172 if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
4173 buf_type = PKTTYPE_IOCTL_RX;
4174 else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
4175 buf_type = PKTTYPE_EVENT_RX;
4176 else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
4177 buf_type = PKTTYPE_TSBUF_RX;
4178 else {
4179 DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
4180 return -1;
4181 }
4182
4183 if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
4184 non_ioctl_resp_buf = TRUE;
4185 else
4186 non_ioctl_resp_buf = FALSE;
4187
4188 if (non_ioctl_resp_buf) {
4189 /* Allocate packet for not ioctl resp buffer post */
4190 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
4191 } else {
4192 /* Allocate packet for ctrl/ioctl buffer post */
4193 pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
4194 }
4195
4196 #ifdef IOCTLRESP_USE_CONSTMEM
4197 if (!non_ioctl_resp_buf) {
4198 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
4199 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
4200 return -1;
4201 }
4202 ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
4203 p = retbuf.va;
4204 pktlen = retbuf.len;
4205 pa = retbuf.pa;
4206 dmah = retbuf.dmah;
4207 } else
4208 #endif /* IOCTLRESP_USE_CONSTMEM */
4209 {
4210 #ifdef DHD_USE_STATIC_CTRLBUF
4211 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
4212 #else
4213 p = PKTGET(dhd->osh, pktsz, FALSE);
4214 #endif /* DHD_USE_STATIC_CTRLBUF */
4215 if (p == NULL) {
4216 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
4217 __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
4218 "EVENT" : "IOCTL RESP"));
4219 dhd->rx_pktgetfail++;
4220 return -1;
4221 }
4222
4223 pktlen = PKTLEN(dhd->osh, p);
4224
4225 if (SECURE_DMA_ENAB(dhd->osh)) {
4226 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
4227 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
4228 }
4229 #ifndef BCM_SECURE_DMA
4230 else
4231 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
4232 #endif /* #ifndef BCM_SECURE_DMA */
4233
4234 if (PHYSADDRISZERO(pa)) {
4235 DHD_ERROR(("Invalid physaddr 0\n"));
4236 ASSERT(0);
4237 goto free_pkt_return;
4238 }
4239
4240 #ifdef DMAMAP_STATS
4241 switch (buf_type) {
4242 #ifndef IOCTLRESP_USE_CONSTMEM
4243 case PKTTYPE_IOCTL_RX:
4244 dhd->dma_stats.ioctl_rx++;
4245 dhd->dma_stats.ioctl_rx_sz += pktlen;
4246 break;
4247 #endif /* !IOCTLRESP_USE_CONSTMEM */
4248 case PKTTYPE_EVENT_RX:
4249 dhd->dma_stats.event_rx++;
4250 dhd->dma_stats.event_rx_sz += pktlen;
4251 break;
4252 case PKTTYPE_TSBUF_RX:
4253 dhd->dma_stats.tsbuf_rx++;
4254 dhd->dma_stats.tsbuf_rx_sz += pktlen;
4255 break;
4256 default:
4257 break;
4258 }
4259 #endif /* DMAMAP_STATS */
4260
4261 }
4262
4263 /* grab the ring lock to allocate pktid and post on ring */
4264 DHD_RING_LOCK(ring->ring_lock, flags);
4265
4266 rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
4267 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
4268
4269 if (rxbuf_post == NULL) {
4270 DHD_RING_UNLOCK(ring->ring_lock, flags);
4271 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
4272 __FUNCTION__, __LINE__));
4273
4274 #ifdef IOCTLRESP_USE_CONSTMEM
4275 if (non_ioctl_resp_buf)
4276 #endif /* IOCTLRESP_USE_CONSTMEM */
4277 {
4278 if (SECURE_DMA_ENAB(dhd->osh)) {
4279 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4280 ring->dma_buf.secdma, 0);
4281 } else {
4282 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4283 }
4284 }
4285 goto free_pkt_return;
4286 }
4287
4288 /* CMN msg header */
4289 rxbuf_post->cmn_hdr.msg_type = msg_type;
4290
4291 #ifdef IOCTLRESP_USE_CONSTMEM
4292 if (!non_ioctl_resp_buf) {
4293 map_handle = dhd->prot->pktid_map_handle_ioctl;
4294 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
4295 ring->dma_buf.secdma, buf_type);
4296 } else
4297 #endif /* IOCTLRESP_USE_CONSTMEM */
4298 {
4299 map_handle = dhd->prot->pktid_ctrl_map;
4300 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
4301 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
4302 buf_type);
4303 }
4304
4305 if (pktid == DHD_PKTID_INVALID) {
4306 if (ring->wr == 0) {
4307 ring->wr = ring->max_items - 1;
4308 } else {
4309 ring->wr--;
4310 if (ring->wr == 0) {
4311 ring->current_phase = ring->current_phase ? 0 :
4312 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4313 }
4314 }
4315 DHD_RING_UNLOCK(ring->ring_lock, flags);
4316 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4317 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
4318 goto free_pkt_return;
4319 }
4320
4321 #ifdef DHD_PKTID_AUDIT_RING
4322 DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
4323 #endif /* DHD_PKTID_AUDIT_RING */
4324
4325 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4326 rxbuf_post->cmn_hdr.if_id = 0;
4327 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4328 ring->seqnum++;
4329 rxbuf_post->cmn_hdr.flags = ring->current_phase;
4330
4331 #if defined(DHD_PCIE_PKTID)
4332 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
4333 if (ring->wr == 0) {
4334 ring->wr = ring->max_items - 1;
4335 } else {
4336 if (ring->wr == 0) {
4337 ring->current_phase = ring->current_phase ? 0 :
4338 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4339 }
4340 }
4341 DHD_RING_UNLOCK(ring->ring_lock, flags);
4342 #ifdef IOCTLRESP_USE_CONSTMEM
4343 if (non_ioctl_resp_buf)
4344 #endif /* IOCTLRESP_USE_CONSTMEM */
4345 {
4346 if (SECURE_DMA_ENAB(dhd->osh)) {
4347 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4348 ring->dma_buf.secdma, 0);
4349 } else
4350 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4351 }
4352 goto free_pkt_return;
4353 }
4354 #endif /* DHD_PCIE_PKTID */
4355
4356 #ifndef IOCTLRESP_USE_CONSTMEM
4357 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
4358 #else
4359 rxbuf_post->host_buf_len = htol16((uint16)pktlen);
4360 #endif /* IOCTLRESP_USE_CONSTMEM */
4361 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4362 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4363
4364 #ifdef DHD_LBUF_AUDIT
4365 if (non_ioctl_resp_buf)
4366 PKTAUDIT(dhd->osh, p);
4367 #endif // endif
4368
4369 /* update ring's WR index and ring doorbell to dongle */
4370 dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
4371
4372 DHD_RING_UNLOCK(ring->ring_lock, flags);
4373
4374 return 1;
4375
4376 free_pkt_return:
4377 if (!non_ioctl_resp_buf) {
4378 #ifdef IOCTLRESP_USE_CONSTMEM
4379 free_ioctl_return_buffer(dhd, &retbuf);
4380 #else
4381 dhd_prot_packet_free(dhd, p, buf_type, FALSE);
4382 #endif /* IOCTLRESP_USE_CONSTMEM */
4383 } else {
4384 dhd_prot_packet_free(dhd, p, buf_type, FALSE);
4385 }
4386
4387 return -1;
4388 } /* dhd_prot_rxbufpost_ctrl */
4389
4390 static uint16
4391 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
4392 {
4393 uint32 i = 0;
4394 int32 ret_val;
4395
4396 DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
4397
4398 if (dhd->busstate == DHD_BUS_DOWN) {
4399 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4400 return 0;
4401 }
4402
4403 while (i < max_to_post) {
4404 ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
4405 if (ret_val < 0)
4406 break;
4407 i++;
4408 }
4409 DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
4410 return (uint16)i;
4411 }
4412
4413 static void
4414 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
4415 {
4416 dhd_prot_t *prot = dhd->prot;
4417 int max_to_post;
4418
4419 DHD_INFO(("ioctl resp buf post\n"));
4420 max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
4421 if (max_to_post <= 0) {
4422 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
4423 __FUNCTION__));
4424 return;
4425 }
4426 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4427 MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
4428 }
4429
4430 static void
4431 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
4432 {
4433 dhd_prot_t *prot = dhd->prot;
4434 int max_to_post;
4435
4436 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
4437 if (max_to_post <= 0) {
4438 DHD_ERROR(("%s: Cannot post more than max event buffers\n",
4439 __FUNCTION__));
4440 return;
4441 }
4442 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4443 MSG_TYPE_EVENT_BUF_POST, max_to_post);
4444 }
4445
4446 static int
4447 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
4448 {
4449 return 0;
4450 }
4451
4452 bool BCMFASTPATH
4453 dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
4454 {
4455 dhd_prot_t *prot = dhd->prot;
4456 bool more = TRUE;
4457 uint n = 0;
4458 msgbuf_ring_t *ring = prot->d2hring_info_cpln;
4459 unsigned long flags;
4460
4461 if (ring == NULL)
4462 return FALSE;
4463 if (ring->inited != TRUE)
4464 return FALSE;
4465
4466 /* Process all the messages - DTOH direction */
4467 while (!dhd_is_device_removed(dhd)) {
4468 uint8 *msg_addr;
4469 uint32 msg_len;
4470
4471 if (dhd->hang_was_sent) {
4472 more = FALSE;
4473 break;
4474 }
4475
4476 #ifdef DHD_MAP_LOGGING
4477 if (dhd->smmu_fault_occurred) {
4478 more = FALSE;
4479 break;
4480 }
4481 #endif /* DHD_MAP_LOGGING */
4482
4483 DHD_RING_LOCK(ring->ring_lock, flags);
4484 /* Get the message from ring */
4485 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4486 DHD_RING_UNLOCK(ring->ring_lock, flags);
4487 if (msg_addr == NULL) {
4488 more = FALSE;
4489 break;
4490 }
4491
4492 /* Prefetch data to populate the cache */
4493 OSL_PREFETCH(msg_addr);
4494
4495 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4496 DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
4497 __FUNCTION__, msg_len));
4498 }
4499
4500 /* Update read pointer */
4501 dhd_prot_upd_read_idx(dhd, ring);
4502
4503 /* After batch processing, check RX bound */
4504 n += msg_len / ring->item_len;
4505 if (n >= bound) {
4506 break;
4507 }
4508 }
4509
4510 return more;
4511 }
4512
4513 /** called when DHD needs to check for 'receive complete' messages from the dongle */
4514 bool BCMFASTPATH
4515 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
4516 {
4517 bool more = FALSE;
4518 uint n = 0;
4519 dhd_prot_t *prot = dhd->prot;
4520 msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
4521 uint16 item_len = ring->item_len;
4522 host_rxbuf_cmpl_t *msg = NULL;
4523 uint8 *msg_addr;
4524 uint32 msg_len;
4525 uint16 pkt_cnt, pkt_cnt_newidx;
4526 unsigned long flags;
4527 dmaaddr_t pa;
4528 uint32 len;
4529 void *dmah;
4530 void *secdma;
4531 int ifidx = 0, if_newidx = 0;
4532 void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
4533 uint32 pktid;
4534 int i;
4535 uint8 sync;
4536
4537 while (1) {
4538 if (dhd_is_device_removed(dhd))
4539 break;
4540
4541 if (dhd->hang_was_sent)
4542 break;
4543
4544 #ifdef DHD_MAP_LOGGING
4545 if (dhd->smmu_fault_occurred) {
4546 break;
4547 }
4548 #endif /* DHD_MAP_LOGGING */
4549
4550 pkt_cnt = 0;
4551 pktqhead = pkt_newidx = NULL;
4552 pkt_cnt_newidx = 0;
4553
4554 DHD_RING_LOCK(ring->ring_lock, flags);
4555
4556 /* Get the address of the next message to be read from ring */
4557 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4558 if (msg_addr == NULL) {
4559 DHD_RING_UNLOCK(ring->ring_lock, flags);
4560 break;
4561 }
4562
4563 while (msg_len > 0) {
4564 msg = (host_rxbuf_cmpl_t *)msg_addr;
4565
4566 /* Wait until DMA completes, then fetch msg_type */
4567 sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
4568 /*
4569 * Update the curr_rd to the current index in the ring, from where
4570 * the work item is fetched. This way if the fetched work item
4571 * fails in LIVELOCK, we can print the exact read index in the ring
4572 * that shows up the corrupted work item.
4573 */
4574 if ((ring->curr_rd + 1) >= ring->max_items) {
4575 ring->curr_rd = 0;
4576 } else {
4577 ring->curr_rd += 1;
4578 }
4579
4580 if (!sync) {
4581 msg_len -= item_len;
4582 msg_addr += item_len;
4583 continue;
4584 }
4585
4586 pktid = ltoh32(msg->cmn_hdr.request_id);
4587
4588 #ifdef DHD_PKTID_AUDIT_RING
4589 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
4590 DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
4591 #endif /* DHD_PKTID_AUDIT_RING */
4592
4593 pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
4594 len, dmah, secdma, PKTTYPE_DATA_RX);
4595 if (!pkt) {
4596 msg_len -= item_len;
4597 msg_addr += item_len;
4598 continue;
4599 }
4600
4601 if (SECURE_DMA_ENAB(dhd->osh))
4602 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
4603 dmah, secdma, 0);
4604 else
4605 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4606
4607 #ifdef DMAMAP_STATS
4608 dhd->dma_stats.rxdata--;
4609 dhd->dma_stats.rxdata_sz -= len;
4610 #endif /* DMAMAP_STATS */
4611 DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
4612 "pktdata %p, metalen %d\n",
4613 ltoh32(msg->cmn_hdr.request_id),
4614 ltoh16(msg->data_offset),
4615 ltoh16(msg->data_len), msg->cmn_hdr.if_id,
4616 msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
4617 ltoh16(msg->metadata_len)));
4618
4619 pkt_cnt++;
4620 msg_len -= item_len;
4621 msg_addr += item_len;
4622
4623 #if DHD_DBG_SHOW_METADATA
4624 if (prot->metadata_dbg && prot->rx_metadata_offset &&
4625 msg->metadata_len) {
4626 uchar *ptr;
4627 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
4628 /* header followed by data */
4629 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
4630 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
4631 }
4632 #endif /* DHD_DBG_SHOW_METADATA */
4633
4634 /* data_offset from buf start */
4635 if (ltoh16(msg->data_offset)) {
4636 /* data offset given from dongle after split rx */
4637 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
4638 }
4639 else if (prot->rx_dataoffset) {
4640 /* DMA RX offset updated through shared area */
4641 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
4642 }
4643 /* Actual length of the packet */
4644 PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
4645 #if defined(WL_MONITOR)
4646 if (dhd_monitor_enabled(dhd, ifidx) &&
4647 (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
4648 dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
4649 continue;
4650 }
4651 #endif // endif
4652
4653 if (!pktqhead) {
4654 pktqhead = prevpkt = pkt;
4655 ifidx = msg->cmn_hdr.if_id;
4656 } else {
4657 if (ifidx != msg->cmn_hdr.if_id) {
4658 pkt_newidx = pkt;
4659 if_newidx = msg->cmn_hdr.if_id;
4660 pkt_cnt--;
4661 pkt_cnt_newidx = 1;
4662 break;
4663 } else {
4664 PKTSETNEXT(dhd->osh, prevpkt, pkt);
4665 prevpkt = pkt;
4666 }
4667 }
4668
4669 #ifdef DHD_LBUF_AUDIT
4670 PKTAUDIT(dhd->osh, pkt);
4671 #endif // endif
4672 }
4673
4674 /* roll back read pointer for unprocessed message */
4675 if (msg_len > 0) {
4676 if (ring->rd < msg_len / item_len)
4677 ring->rd = ring->max_items - msg_len / item_len;
4678 else
4679 ring->rd -= msg_len / item_len;
4680 }
4681
4682 /* Update read pointer */
4683 dhd_prot_upd_read_idx(dhd, ring);
4684
4685 DHD_RING_UNLOCK(ring->ring_lock, flags);
4686
4687 pkt = pktqhead;
4688 for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
4689 nextpkt = PKTNEXT(dhd->osh, pkt);
4690 PKTSETNEXT(dhd->osh, pkt, NULL);
4691 #ifdef DHD_LB_RXP
4692 dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
4693 #elif defined(DHD_RX_CHAINING)
4694 dhd_rxchain_frame(dhd, pkt, ifidx);
4695 #else
4696 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
4697 #endif /* DHD_LB_RXP */
4698 }
4699
4700 if (pkt_newidx) {
4701 #ifdef DHD_LB_RXP
4702 dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
4703 #elif defined(DHD_RX_CHAINING)
4704 dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
4705 #else
4706 dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
4707 #endif /* DHD_LB_RXP */
4708 }
4709
4710 pkt_cnt += pkt_cnt_newidx;
4711
4712 /* Post another set of rxbufs to the device */
4713 dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
4714
4715 #ifdef DHD_RX_CHAINING
4716 dhd_rxchain_commit(dhd);
4717 #endif // endif
4718
4719 /* After batch processing, check RX bound */
4720 n += pkt_cnt;
4721 if (n >= bound) {
4722 more = TRUE;
4723 break;
4724 }
4725 }
4726
4727 /* Call lb_dispatch only if packets are queued */
4728 if (n) {
4729 DHD_LB_DISPATCH_RX_COMPL(dhd);
4730 DHD_LB_DISPATCH_RX_PROCESS(dhd);
4731 }
4732
4733 return more;
4734 }
4735
4736 /**
4737 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
4738 */
4739 void
4740 dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
4741 {
4742 msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
4743
4744 if (ring == NULL) {
4745 DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
4746 return;
4747 }
4748 /* Update read pointer */
4749 if (dhd->dma_d2h_ring_upd_support) {
4750 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
4751 }
4752
4753 DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
4754 ring->idx, flowid, ring->wr, ring->rd));
4755
4756 /* Need more logic here, but for now use it directly */
4757 dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
4758 }
4759
4760 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
4761 bool BCMFASTPATH
4762 dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
4763 {
4764 bool more = TRUE;
4765 uint n = 0;
4766 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
4767 unsigned long flags;
4768
4769 /* Process all the messages - DTOH direction */
4770 while (!dhd_is_device_removed(dhd)) {
4771 uint8 *msg_addr;
4772 uint32 msg_len;
4773
4774 if (dhd->hang_was_sent) {
4775 more = FALSE;
4776 break;
4777 }
4778
4779 #ifdef DHD_MAP_LOGGING
4780 if (dhd->smmu_fault_occurred) {
4781 more = FALSE;
4782 break;
4783 }
4784 #endif /* DHD_MAP_LOGGING */
4785
4786 DHD_RING_LOCK(ring->ring_lock, flags);
4787 /* Get the address of the next message to be read from ring */
4788 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4789 DHD_RING_UNLOCK(ring->ring_lock, flags);
4790
4791 if (msg_addr == NULL) {
4792 more = FALSE;
4793 break;
4794 }
4795
4796 /* Prefetch data to populate the cache */
4797 OSL_PREFETCH(msg_addr);
4798
4799 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4800 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4801 __FUNCTION__, ring->name, msg_addr, msg_len));
4802 }
4803
4804 /* Write to dngl rd ptr */
4805 dhd_prot_upd_read_idx(dhd, ring);
4806
4807 /* After batch processing, check bound */
4808 n += msg_len / ring->item_len;
4809 if (n >= bound) {
4810 break;
4811 }
4812 }
4813
4814 DHD_LB_DISPATCH_TX_COMPL(dhd);
4815
4816 return more;
4817 }
4818
4819 int BCMFASTPATH
4820 dhd_prot_process_trapbuf(dhd_pub_t *dhd)
4821 {
4822 uint32 data;
4823 dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
4824
4825 /* Interrupts can come in before this struct
4826 * has been initialized.
4827 */
4828 if (trap_addr->va == NULL) {
4829 DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
4830 return 0;
4831 }
4832
4833 OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
4834 data = *(uint32 *)(trap_addr->va);
4835
4836 if (data & D2H_DEV_FWHALT) {
4837 DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
4838
4839 if (data & D2H_DEV_EXT_TRAP_DATA)
4840 {
4841 if (dhd->extended_trap_data) {
4842 OSL_CACHE_INV((void *)trap_addr->va,
4843 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4844 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
4845 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4846 }
4847 DHD_ERROR(("Extended trap data available\n"));
4848 }
4849 return data;
4850 }
4851 return 0;
4852 }
4853
4854 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
4855 int BCMFASTPATH
4856 dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
4857 {
4858 dhd_prot_t *prot = dhd->prot;
4859 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
4860 unsigned long flags;
4861
4862 /* Process all the messages - DTOH direction */
4863 while (!dhd_is_device_removed(dhd)) {
4864 uint8 *msg_addr;
4865 uint32 msg_len;
4866
4867 if (dhd->hang_was_sent) {
4868 break;
4869 }
4870
4871 #ifdef DHD_MAP_LOGGING
4872 if (dhd->smmu_fault_occurred) {
4873 break;
4874 }
4875 #endif /* DHD_MAP_LOGGING */
4876
4877 DHD_RING_LOCK(ring->ring_lock, flags);
4878 /* Get the address of the next message to be read from ring */
4879 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4880 DHD_RING_UNLOCK(ring->ring_lock, flags);
4881
4882 if (msg_addr == NULL) {
4883 break;
4884 }
4885
4886 /* Prefetch data to populate the cache */
4887 OSL_PREFETCH(msg_addr);
4888 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4889 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4890 __FUNCTION__, ring->name, msg_addr, msg_len));
4891 }
4892
4893 /* Write to dngl rd ptr */
4894 dhd_prot_upd_read_idx(dhd, ring);
4895 }
4896
4897 return 0;
4898 }
4899
4900 /**
4901 * Consume messages out of the D2H ring. Ensure that the message's DMA to host
4902 * memory has completed, before invoking the message handler via a table lookup
4903 * of the cmn_msg_hdr::msg_type.
4904 */
4905 static int BCMFASTPATH
4906 dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
4907 {
4908 uint32 buf_len = len;
4909 uint16 item_len;
4910 uint8 msg_type;
4911 cmn_msg_hdr_t *msg = NULL;
4912 int ret = BCME_OK;
4913
4914 ASSERT(ring);
4915 item_len = ring->item_len;
4916 if (item_len == 0) {
4917 DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
4918 __FUNCTION__, ring->idx, item_len, buf_len));
4919 return BCME_ERROR;
4920 }
4921
4922 while (buf_len > 0) {
4923 if (dhd->hang_was_sent) {
4924 ret = BCME_ERROR;
4925 goto done;
4926 }
4927
4928 #ifdef DHD_MAP_LOGGING
4929 if (dhd->smmu_fault_occurred) {
4930 ret = BCME_ERROR;
4931 goto done;
4932 }
4933 #endif /* DHD_MAP_LOGGING */
4934
4935 msg = (cmn_msg_hdr_t *)buf;
4936
4937 /* Wait until DMA completes, then fetch msg_type */
4938 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
4939
4940 /*
4941 * Update the curr_rd to the current index in the ring, from where
4942 * the work item is fetched. This way if the fetched work item
4943 * fails in LIVELOCK, we can print the exact read index in the ring
4944 * that shows up the corrupted work item.
4945 */
4946 if ((ring->curr_rd + 1) >= ring->max_items) {
4947 ring->curr_rd = 0;
4948 } else {
4949 ring->curr_rd += 1;
4950 }
4951
4952 /* Prefetch data to populate the cache */
4953 OSL_PREFETCH(buf + item_len);
4954
4955 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
4956 msg_type, item_len, buf_len));
4957
4958 if (msg_type == MSG_TYPE_LOOPBACK) {
4959 bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
4960 DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
4961 }
4962
4963 ASSERT(msg_type < DHD_PROT_FUNCS);
4964 if (msg_type >= DHD_PROT_FUNCS) {
4965 DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
4966 __FUNCTION__, msg_type, item_len, buf_len));
4967 ret = BCME_ERROR;
4968 goto done;
4969 }
4970
4971 if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
4972 if (ring == dhd->prot->d2hring_info_cpln) {
4973 if (!dhd->prot->infobufpost) {
4974 DHD_ERROR(("infobuf posted are zero,"
4975 "but there is a completion\n"));
4976 goto done;
4977 }
4978 dhd->prot->infobufpost--;
4979 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
4980 dhd_prot_process_infobuf_complete(dhd, buf);
4981 }
4982 } else
4983 if (table_lookup[msg_type]) {
4984 table_lookup[msg_type](dhd, buf);
4985 }
4986
4987 if (buf_len < item_len) {
4988 ret = BCME_ERROR;
4989 goto done;
4990 }
4991 buf_len = buf_len - item_len;
4992 buf = buf + item_len;
4993 }
4994
4995 done:
4996
4997 #ifdef DHD_RX_CHAINING
4998 dhd_rxchain_commit(dhd);
4999 #endif // endif
5000
5001 return ret;
5002 } /* dhd_prot_process_msgtype */
5003
5004 static void
5005 dhd_prot_noop(dhd_pub_t *dhd, void *msg)
5006 {
5007 return;
5008 }
5009
5010 /** called on MSG_TYPE_RING_STATUS message received from dongle */
5011 static void
5012 dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
5013 {
5014 pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
5015 uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
5016 uint16 status = ltoh16(ring_status->compl_hdr.status);
5017 uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
5018
5019 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
5020 request_id, status, ring_id, ltoh16(ring_status->write_idx)));
5021
5022 if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
5023 return;
5024 if (status == BCMPCIE_BAD_PHASE) {
5025 /* bad phase report from */
5026 DHD_ERROR(("Bad phase\n"));
5027 }
5028 if (status != BCMPCIE_BADOPTION)
5029 return;
5030
5031 if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
5032 if (dhd->prot->h2dring_info_subn != NULL) {
5033 if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
5034 DHD_ERROR(("H2D ring create failed for info ring\n"));
5035 dhd->prot->h2dring_info_subn->create_pending = FALSE;
5036 }
5037 else
5038 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
5039 } else {
5040 DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
5041 }
5042 }
5043 else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
5044 if (dhd->prot->d2hring_info_cpln != NULL) {
5045 if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
5046 DHD_ERROR(("D2H ring create failed for info ring\n"));
5047 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
5048 }
5049 else
5050 DHD_ERROR(("ring create ID for info ring, create not pending\n"));
5051 } else {
5052 DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
5053 }
5054 }
5055 else {
5056 DHD_ERROR(("don;t know how to pair with original request\n"));
5057 }
5058 /* How do we track this to pair it with ??? */
5059 return;
5060 }
5061
5062 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
5063 static void
5064 dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
5065 {
5066 pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
5067 DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
5068 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
5069 gen_status->compl_hdr.flow_ring_id));
5070
5071 /* How do we track this to pair it with ??? */
5072 return;
5073 }
5074
5075 /**
5076 * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
5077 * dongle received the ioctl message in dongle memory.
5078 */
5079 static void
5080 dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
5081 {
5082 ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
5083 unsigned long flags;
5084 #if defined(DHD_PKTID_AUDIT_RING)
5085 uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
5086 #endif // endif
5087
5088 #if defined(DHD_PKTID_AUDIT_RING)
5089 /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
5090 if (pktid != DHD_IOCTL_REQ_PKTID) {
5091 #ifndef IOCTLRESP_USE_CONSTMEM
5092 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
5093 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5094 #else
5095 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
5096 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5097 #endif /* !IOCTLRESP_USE_CONSTMEM */
5098 }
5099 #endif // endif
5100
5101 dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
5102
5103 DHD_GENERAL_LOCK(dhd, flags);
5104 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
5105 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
5106 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
5107 } else {
5108 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
5109 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
5110 prhex("dhd_prot_ioctack_process:",
5111 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5112 }
5113 DHD_GENERAL_UNLOCK(dhd, flags);
5114
5115 DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
5116 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
5117 ioct_ack->compl_hdr.flow_ring_id));
5118 if (ioct_ack->compl_hdr.status != 0) {
5119 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
5120 }
5121 }
5122
5123 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
5124 static void
5125 dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
5126 {
5127 dhd_prot_t *prot = dhd->prot;
5128 uint32 pkt_id, xt_id;
5129 ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
5130 void *pkt;
5131 unsigned long flags;
5132 dhd_dma_buf_t retbuf;
5133
5134 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
5135
5136 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
5137
5138 #if defined(DHD_PKTID_AUDIT_RING)
5139 #ifndef IOCTLRESP_USE_CONSTMEM
5140 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
5141 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5142 #else
5143 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
5144 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5145 #endif /* !IOCTLRESP_USE_CONSTMEM */
5146 #endif // endif
5147
5148 DHD_GENERAL_LOCK(dhd, flags);
5149 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
5150 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
5151 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
5152 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
5153 prhex("dhd_prot_ioctcmplt_process:",
5154 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5155 DHD_GENERAL_UNLOCK(dhd, flags);
5156 return;
5157 }
5158
5159 dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
5160
5161 /* Clear Response pending bit */
5162 prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
5163 DHD_GENERAL_UNLOCK(dhd, flags);
5164
5165 #ifndef IOCTLRESP_USE_CONSTMEM
5166 pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
5167 #else
5168 dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
5169 pkt = retbuf.va;
5170 #endif /* !IOCTLRESP_USE_CONSTMEM */
5171 if (!pkt) {
5172 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
5173 prhex("dhd_prot_ioctcmplt_process:",
5174 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5175 return;
5176 }
5177
5178 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
5179 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
5180 xt_id = ltoh16(ioct_resp->trans_id);
5181
5182 if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
5183 DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
5184 __FUNCTION__, xt_id, prot->ioctl_trans_id,
5185 prot->curr_ioctl_cmd, ioct_resp->cmd));
5186 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
5187 dhd_prot_debug_info_print(dhd);
5188 #ifdef DHD_FW_COREDUMP
5189 if (dhd->memdump_enabled) {
5190 /* collect core dump */
5191 dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
5192 dhd_bus_mem_dump(dhd);
5193 }
5194 #else
5195 ASSERT(0);
5196 #endif /* DHD_FW_COREDUMP */
5197 dhd_schedule_reset(dhd);
5198 goto exit;
5199 }
5200 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
5201 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
5202
5203 if (prot->ioctl_resplen > 0) {
5204 #ifndef IOCTLRESP_USE_CONSTMEM
5205 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
5206 #else
5207 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
5208 #endif /* !IOCTLRESP_USE_CONSTMEM */
5209 }
5210
5211 /* wake up any dhd_os_ioctl_resp_wait() */
5212 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
5213
5214 exit:
5215 #ifndef IOCTLRESP_USE_CONSTMEM
5216 dhd_prot_packet_free(dhd, pkt,
5217 PKTTYPE_IOCTL_RX, FALSE);
5218 #else
5219 free_ioctl_return_buffer(dhd, &retbuf);
5220 #endif /* !IOCTLRESP_USE_CONSTMEM */
5221
5222 /* Post another ioctl buf to the device */
5223 if (prot->cur_ioctlresp_bufs_posted > 0) {
5224 prot->cur_ioctlresp_bufs_posted--;
5225 }
5226
5227 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
5228 }
5229
5230 /** called on MSG_TYPE_TX_STATUS message received from dongle */
5231 static void BCMFASTPATH
5232 dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
5233 {
5234 dhd_prot_t *prot = dhd->prot;
5235 host_txbuf_cmpl_t * txstatus;
5236 unsigned long flags;
5237 uint32 pktid;
5238 void *pkt;
5239 dmaaddr_t pa;
5240 uint32 len;
5241 void *dmah;
5242 void *secdma;
5243 bool pkt_fate;
5244 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
5245 #ifdef TX_STATUS_LATENCY_STATS
5246 flow_info_t *flow_info;
5247 uint64 tx_status_latency;
5248 #endif /* TX_STATUS_LATENCY_STATS */
5249 #if defined(TX_STATUS_LATENCY_STATS)
5250 flow_ring_node_t *flow_ring_node;
5251 uint16 flowid;
5252 #endif // endif
5253
5254 txstatus = (host_txbuf_cmpl_t *)msg;
5255 #if defined(TX_STATUS_LATENCY_STATS)
5256 flowid = txstatus->compl_hdr.flow_ring_id;
5257 flow_ring_node = DHD_FLOW_RING(dhd, flowid);
5258 #endif // endif
5259
5260 /* locks required to protect circular buffer accesses */
5261 DHD_RING_LOCK(ring->ring_lock, flags);
5262 pktid = ltoh32(txstatus->cmn_hdr.request_id);
5263 pkt_fate = TRUE;
5264
5265 #if defined(DHD_PKTID_AUDIT_RING)
5266 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
5267 DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
5268 #endif // endif
5269
5270 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
5271 if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
5272 DHD_ERROR(("Extra packets are freed\n"));
5273 }
5274 ASSERT(pktid != 0);
5275
5276 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
5277 pa, len, dmah, secdma, PKTTYPE_DATA_TX);
5278 if (!pkt) {
5279 DHD_RING_UNLOCK(ring->ring_lock, flags);
5280 DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
5281 prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
5282 #ifdef DHD_FW_COREDUMP
5283 if (dhd->memdump_enabled) {
5284 /* collect core dump */
5285 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
5286 dhd_bus_mem_dump(dhd);
5287 }
5288 #else
5289 ASSERT(0);
5290 #endif /* DHD_FW_COREDUMP */
5291 return;
5292 }
5293
5294 if (SECURE_DMA_ENAB(dhd->osh)) {
5295 int offset = 0;
5296 BCM_REFERENCE(offset);
5297
5298 if (dhd->prot->tx_metadata_offset)
5299 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
5300 SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
5301 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
5302 secdma, offset);
5303 } else {
5304 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
5305 }
5306
5307 #ifdef TX_STATUS_LATENCY_STATS
5308 /* update the tx status latency for flowid */
5309 flow_info = &flow_ring_node->flow_info;
5310 tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
5311 flow_info->cum_tx_status_latency += tx_status_latency;
5312 flow_info->num_tx_status++;
5313 #endif /* TX_STATUS_LATENCY_STATS */
5314 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
5315 {
5316 int elem_ix;
5317 void **elem;
5318 bcm_workq_t *workq;
5319
5320 workq = &prot->tx_compl_prod;
5321 /*
5322 * Produce the packet into the tx_compl workq for the tx compl tasklet
5323 * to consume.
5324 */
5325 OSL_PREFETCH(PKTTAG(pkt));
5326
5327 /* fetch next available slot in workq */
5328 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
5329
5330 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
5331 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
5332
5333 if (elem_ix == BCM_RING_FULL) {
5334 DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
5335 goto workq_ring_full;
5336 }
5337
5338 elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
5339 *elem = pkt;
5340
5341 smp_wmb();
5342
5343 /* Sync WR index to consumer if the SYNC threshold has been reached */
5344 if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
5345 bcm_workq_prod_sync(workq);
5346 prot->tx_compl_prod_sync = 0;
5347 }
5348
5349 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
5350 __FUNCTION__, pkt, prot->tx_compl_prod_sync));
5351
5352 DHD_RING_UNLOCK(ring->ring_lock, flags);
5353 return;
5354 }
5355
5356 workq_ring_full:
5357
5358 #endif /* !DHD_LB_TXC */
5359
5360 #ifdef DMAMAP_STATS
5361 dhd->dma_stats.txdata--;
5362 dhd->dma_stats.txdata_sz -= len;
5363 #endif /* DMAMAP_STATS */
5364 pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
5365 ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
5366 #ifdef DHD_PKT_LOGGING
5367 if (dhd->d11_tx_status) {
5368 DHD_PKTLOG_TXS(dhd, pkt, pktid,
5369 ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
5370 }
5371 #endif /* DHD_PKT_LOGGING */
5372 #if defined(BCMPCIE)
5373 dhd_txcomplete(dhd, pkt, pkt_fate);
5374 #endif // endif
5375
5376 #if DHD_DBG_SHOW_METADATA
5377 if (dhd->prot->metadata_dbg &&
5378 dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
5379 uchar *ptr;
5380 /* The Ethernet header of TX frame was copied and removed.
5381 * Here, move the data pointer forward by Ethernet header size.
5382 */
5383 PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
5384 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
5385 bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
5386 dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
5387 }
5388 #endif /* DHD_DBG_SHOW_METADATA */
5389
5390 #ifdef DHD_LBUF_AUDIT
5391 PKTAUDIT(dhd->osh, pkt);
5392 #endif // endif
5393
5394 DHD_RING_UNLOCK(ring->ring_lock, flags);
5395 PKTFREE(dhd->osh, pkt, TRUE);
5396 DHD_RING_LOCK(ring->ring_lock, flags);
5397 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
5398 txstatus->tx_status);
5399 DHD_RING_UNLOCK(ring->ring_lock, flags);
5400
5401 return;
5402 } /* dhd_prot_txstatus_process */
5403
5404 /** called on MSG_TYPE_WL_EVENT message received from dongle */
5405 static void
5406 dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
5407 {
5408 wlevent_req_msg_t *evnt;
5409 uint32 bufid;
5410 uint16 buflen;
5411 int ifidx = 0;
5412 void* pkt;
5413 dhd_prot_t *prot = dhd->prot;
5414
5415 /* Event complete header */
5416 evnt = (wlevent_req_msg_t *)msg;
5417 bufid = ltoh32(evnt->cmn_hdr.request_id);
5418
5419 #if defined(DHD_PKTID_AUDIT_RING)
5420 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
5421 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5422 #endif // endif
5423
5424 buflen = ltoh16(evnt->event_data_len);
5425
5426 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
5427
5428 /* Post another rxbuf to the device */
5429 if (prot->cur_event_bufs_posted)
5430 prot->cur_event_bufs_posted--;
5431 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
5432
5433 pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
5434
5435 if (!pkt) {
5436 DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
5437 return;
5438 }
5439
5440 /* DMA RX offset updated through shared area */
5441 if (dhd->prot->rx_dataoffset)
5442 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5443
5444 PKTSETLEN(dhd->osh, pkt, buflen);
5445 #ifdef DHD_LBUF_AUDIT
5446 PKTAUDIT(dhd->osh, pkt);
5447 #endif // endif
5448 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
5449 }
5450
5451 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
5452 static void BCMFASTPATH
5453 dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
5454 {
5455 info_buf_resp_t *resp;
5456 uint32 pktid;
5457 uint16 buflen;
5458 void * pkt;
5459
5460 resp = (info_buf_resp_t *)buf;
5461 pktid = ltoh32(resp->cmn_hdr.request_id);
5462 buflen = ltoh16(resp->info_data_len);
5463
5464 #ifdef DHD_PKTID_AUDIT_RING
5465 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
5466 DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
5467 #endif /* DHD_PKTID_AUDIT_RING */
5468
5469 DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
5470 pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
5471 dhd->prot->rx_dataoffset));
5472
5473 pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
5474 if (!pkt)
5475 return;
5476
5477 /* DMA RX offset updated through shared area */
5478 if (dhd->prot->rx_dataoffset)
5479 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5480
5481 PKTSETLEN(dhd->osh, pkt, buflen);
5482
5483 #ifdef DHD_LBUF_AUDIT
5484 PKTAUDIT(dhd->osh, pkt);
5485 #endif // endif
5486
5487 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5488 * special ifidx of -1. This is just internal to dhd to get the data to
5489 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
5490 */
5491 dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
5492 }
5493
5494 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
5495 static void BCMFASTPATH
5496 dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
5497 {
5498 }
5499
5500 /** Stop protocol: sync w/dongle state. */
5501 void dhd_prot_stop(dhd_pub_t *dhd)
5502 {
5503 ASSERT(dhd);
5504 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5505
5506 }
5507
5508 /* Add any protocol-specific data header.
5509 * Caller must reserve prot_hdrlen prepend space.
5510 */
5511 void BCMFASTPATH
5512 dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
5513 {
5514 return;
5515 }
5516
5517 uint
5518 dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
5519 {
5520 return 0;
5521 }
5522
5523 #define MAX_MTU_SZ (1600u)
5524
5525 #define PKTBUF pktbuf
5526
5527 /**
5528 * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
5529 * the corresponding flow ring.
5530 */
5531 int BCMFASTPATH
5532 dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
5533 {
5534 unsigned long flags;
5535 dhd_prot_t *prot = dhd->prot;
5536 host_txbuf_post_t *txdesc = NULL;
5537 dmaaddr_t pa, meta_pa;
5538 uint8 *pktdata;
5539 uint32 pktlen;
5540 uint32 pktid;
5541 uint8 prio;
5542 uint16 flowid = 0;
5543 uint16 alloced = 0;
5544 uint16 headroom;
5545 msgbuf_ring_t *ring;
5546 flow_ring_table_t *flow_ring_table;
5547 flow_ring_node_t *flow_ring_node;
5548
5549 if (dhd->flow_ring_table == NULL) {
5550 return BCME_NORESOURCE;
5551 }
5552
5553 flowid = DHD_PKT_GET_FLOWID(PKTBUF);
5554 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5555 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5556
5557 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5558
5559 DHD_RING_LOCK(ring->ring_lock, flags);
5560
5561 /* Create a unique 32-bit packet id */
5562 pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
5563 PKTBUF, PKTTYPE_DATA_TX);
5564 #if defined(DHD_PCIE_PKTID)
5565 if (pktid == DHD_PKTID_INVALID) {
5566 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
5567 /*
5568 * If we return error here, the caller would queue the packet
5569 * again. So we'll just free the skb allocated in DMA Zone.
5570 * Since we have not freed the original SKB yet the caller would
5571 * requeue the same.
5572 */
5573 goto err_no_res_pktfree;
5574 }
5575 #endif /* DHD_PCIE_PKTID */
5576
5577 /* Reserve space in the circular buffer */
5578 txdesc = (host_txbuf_post_t *)
5579 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5580 if (txdesc == NULL) {
5581 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
5582 __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
5583 goto err_free_pktid;
5584 }
5585
5586 DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
5587 #ifdef DHD_PKT_LOGGING
5588 DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
5589 #endif /* DHD_PKT_LOGGING */
5590
5591 /* Extract the data pointer and length information */
5592 pktdata = PKTDATA(dhd->osh, PKTBUF);
5593 pktlen = PKTLEN(dhd->osh, PKTBUF);
5594
5595 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
5596 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
5597
5598 /* Extract the ethernet header and adjust the data pointer and length */
5599 pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5600 pktlen -= ETHER_HDR_LEN;
5601
5602 /* Map the data pointer to a DMA-able address */
5603 if (SECURE_DMA_ENAB(dhd->osh)) {
5604 int offset = 0;
5605 BCM_REFERENCE(offset);
5606
5607 if (prot->tx_metadata_offset)
5608 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5609
5610 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
5611 DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
5612 }
5613 #ifndef BCM_SECURE_DMA
5614 else
5615 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
5616 #endif /* #ifndef BCM_SECURE_DMA */
5617
5618 if (PHYSADDRISZERO(pa)) {
5619 DHD_ERROR(("%s: Something really bad, unless 0 is "
5620 "a valid phyaddr for pa\n", __FUNCTION__));
5621 ASSERT(0);
5622 goto err_rollback_idx;
5623 }
5624
5625 #ifdef DMAMAP_STATS
5626 dhd->dma_stats.txdata++;
5627 dhd->dma_stats.txdata_sz += pktlen;
5628 #endif /* DMAMAP_STATS */
5629 /* No need to lock. Save the rest of the packet's metadata */
5630 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
5631 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
5632
5633 #ifdef TXP_FLUSH_NITEMS
5634 if (ring->pend_items_count == 0)
5635 ring->start_addr = (void *)txdesc;
5636 ring->pend_items_count++;
5637 #endif // endif
5638
5639 /* Form the Tx descriptor message buffer */
5640
5641 /* Common message hdr */
5642 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
5643 txdesc->cmn_hdr.if_id = ifidx;
5644 txdesc->cmn_hdr.flags = ring->current_phase;
5645
5646 txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
5647 prio = (uint8)PKTPRIO(PKTBUF);
5648
5649 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
5650 txdesc->seg_cnt = 1;
5651
5652 txdesc->data_len = htol16((uint16) pktlen);
5653 txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5654 txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5655
5656 /* Move data pointer to keep ether header in local PKTBUF for later reference */
5657 PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5658
5659 /* Handle Tx metadata */
5660 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
5661 if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
5662 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
5663 prot->tx_metadata_offset, headroom));
5664
5665 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
5666 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
5667
5668 /* Adjust the data pointer to account for meta data in DMA_MAP */
5669 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5670
5671 if (SECURE_DMA_ENAB(dhd->osh)) {
5672 meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5673 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
5674 0, ring->dma_buf.secdma);
5675 }
5676 #ifndef BCM_SECURE_DMA
5677 else
5678 meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5679 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
5680 #endif /* #ifndef BCM_SECURE_DMA */
5681
5682 if (PHYSADDRISZERO(meta_pa)) {
5683 /* Unmap the data pointer to a DMA-able address */
5684 if (SECURE_DMA_ENAB(dhd->osh)) {
5685 int offset = 0;
5686 BCM_REFERENCE(offset);
5687
5688 if (prot->tx_metadata_offset) {
5689 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5690 }
5691
5692 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
5693 DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
5694 }
5695 #ifndef BCM_SECURE_DMA
5696 else {
5697 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
5698 }
5699 #endif /* #ifndef BCM_SECURE_DMA */
5700 #ifdef TXP_FLUSH_NITEMS
5701 /* update pend_items_count */
5702 ring->pend_items_count--;
5703 #endif /* TXP_FLUSH_NITEMS */
5704
5705 DHD_ERROR(("%s: Something really bad, unless 0 is "
5706 "a valid phyaddr for meta_pa\n", __FUNCTION__));
5707 ASSERT(0);
5708 goto err_rollback_idx;
5709 }
5710
5711 /* Adjust the data pointer back to original value */
5712 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5713
5714 txdesc->metadata_buf_len = prot->tx_metadata_offset;
5715 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
5716 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
5717 } else {
5718 txdesc->metadata_buf_len = htol16(0);
5719 txdesc->metadata_buf_addr.high_addr = 0;
5720 txdesc->metadata_buf_addr.low_addr = 0;
5721 }
5722
5723 #ifdef DHD_PKTID_AUDIT_RING
5724 DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
5725 #endif /* DHD_PKTID_AUDIT_RING */
5726
5727 txdesc->cmn_hdr.request_id = htol32(pktid);
5728
5729 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
5730 txdesc->cmn_hdr.request_id));
5731
5732 #ifdef DHD_LBUF_AUDIT
5733 PKTAUDIT(dhd->osh, PKTBUF);
5734 #endif // endif
5735
5736 if (pktlen > MAX_MTU_SZ) {
5737 DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
5738 __FUNCTION__, pktlen, MAX_MTU_SZ));
5739 dhd_prhex("txringitem", (volatile uchar*)txdesc,
5740 sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
5741 }
5742
5743 /* Update the write pointer in TCM & ring bell */
5744 #ifdef TXP_FLUSH_NITEMS
5745 /* Flush if we have either hit the txp_threshold or if this msg is */
5746 /* occupying the last slot in the flow_ring - before wrap around. */
5747 if ((ring->pend_items_count == prot->txp_threshold) ||
5748 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
5749 dhd_prot_txdata_write_flush(dhd, flowid);
5750 }
5751 #else
5752 /* update ring's WR index and ring doorbell to dongle */
5753 dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
5754 #endif // endif
5755 #ifdef TX_STATUS_LATENCY_STATS
5756 /* set the time when pkt is queued to flowring */
5757 DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
5758 #endif /* TX_STATUS_LATENCY_STATS */
5759
5760 OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
5761 /*
5762 * Take a wake lock, do not sleep if we have atleast one packet
5763 * to finish.
5764 */
5765 DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
5766
5767 DHD_RING_UNLOCK(ring->ring_lock, flags);
5768
5769 return BCME_OK;
5770
5771 err_rollback_idx:
5772 /* roll back write pointer for unprocessed message */
5773 if (ring->wr == 0) {
5774 ring->wr = ring->max_items - 1;
5775 } else {
5776 ring->wr--;
5777 if (ring->wr == 0) {
5778 DHD_INFO(("%s: flipping the phase now\n", ring->name));
5779 ring->current_phase = ring->current_phase ?
5780 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5781 }
5782 }
5783
5784 err_free_pktid:
5785 #if defined(DHD_PCIE_PKTID)
5786 {
5787 void *dmah;
5788 void *secdma;
5789 /* Free up the PKTID. physaddr and pktlen will be garbage. */
5790 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
5791 pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
5792 }
5793
5794 err_no_res_pktfree:
5795 #endif /* DHD_PCIE_PKTID */
5796
5797 DHD_RING_UNLOCK(ring->ring_lock, flags);
5798
5799 return BCME_NORESOURCE;
5800 } /* dhd_prot_txdata */
5801
5802 /* called with a ring_lock */
5803 /** optimization to write "n" tx items at a time to ring */
5804 void BCMFASTPATH
5805 dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
5806 {
5807 #ifdef TXP_FLUSH_NITEMS
5808 flow_ring_table_t *flow_ring_table;
5809 flow_ring_node_t *flow_ring_node;
5810 msgbuf_ring_t *ring;
5811
5812 if (dhd->flow_ring_table == NULL) {
5813 return;
5814 }
5815
5816 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5817 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5818 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5819
5820 if (ring->pend_items_count) {
5821 /* update ring's WR index and ring doorbell to dongle */
5822 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
5823 ring->pend_items_count);
5824 ring->pend_items_count = 0;
5825 ring->start_addr = NULL;
5826 }
5827 #endif /* TXP_FLUSH_NITEMS */
5828 }
5829
5830 #undef PKTBUF /* Only defined in the above routine */
5831
5832 int BCMFASTPATH
5833 dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
5834 {
5835 return 0;
5836 }
5837
5838 /** post a set of receive buffers to the dongle */
5839 static void BCMFASTPATH
5840 dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
5841 {
5842 dhd_prot_t *prot = dhd->prot;
5843 #if defined(DHD_LB_RXC)
5844 int elem_ix;
5845 uint32 *elem;
5846 bcm_workq_t *workq;
5847
5848 workq = &prot->rx_compl_prod;
5849
5850 /* Produce the work item */
5851 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
5852 if (elem_ix == BCM_RING_FULL) {
5853 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
5854 ASSERT(0);
5855 return;
5856 }
5857
5858 elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
5859 *elem = pktid;
5860
5861 smp_wmb();
5862
5863 /* Sync WR index to consumer if the SYNC threshold has been reached */
5864 if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
5865 bcm_workq_prod_sync(workq);
5866 prot->rx_compl_prod_sync = 0;
5867 }
5868
5869 DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
5870 __FUNCTION__, pktid, prot->rx_compl_prod_sync));
5871
5872 #endif /* DHD_LB_RXC */
5873
5874 if (prot->rxbufpost >= rxcnt) {
5875 prot->rxbufpost -= (uint16)rxcnt;
5876 } else {
5877 /* ASSERT(0); */
5878 prot->rxbufpost = 0;
5879 }
5880
5881 #if !defined(DHD_LB_RXC)
5882 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
5883 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5884 #endif /* !DHD_LB_RXC */
5885 return;
5886 }
5887
5888 /* called before an ioctl is sent to the dongle */
5889 static void
5890 dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
5891 {
5892 dhd_prot_t *prot = dhd->prot;
5893 int slen = 0;
5894
5895 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
5896 pcie_bus_tput_params_t *tput_params;
5897
5898 slen = strlen("pcie_bus_tput") + 1;
5899 tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
5900 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
5901 sizeof(tput_params->host_buf_addr));
5902 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
5903 }
5904
5905 }
5906
5907 /* called after an ioctl returns from dongle */
5908 static void
5909 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
5910 int ifidx, int ret, int len)
5911 {
5912
5913 if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
5914 /* Intercept the wme_dp ioctl here */
5915 if (!strcmp(buf, "wme_dp")) {
5916 int slen, val = 0;
5917
5918 slen = strlen("wme_dp") + 1;
5919 if (len >= (int)(slen + sizeof(int)))
5920 bcopy(((char *)buf + slen), &val, sizeof(int));
5921 dhd->wme_dp = (uint8) ltoh32(val);
5922 }
5923
5924 }
5925
5926 }
5927
5928 #ifdef DHD_PM_CONTROL_FROM_FILE
5929 extern bool g_pm_control;
5930 #endif /* DHD_PM_CONTROL_FROM_FILE */
5931
5932 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
5933 int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
5934 {
5935 int ret = -1;
5936 uint8 action;
5937
5938 if (dhd->bus->is_linkdown) {
5939 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
5940 goto done;
5941 }
5942
5943 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
5944 DHD_ERROR(("%s : bus is down. we have nothing to do -"
5945 " bus state: %d, sent hang: %d\n", __FUNCTION__,
5946 dhd->busstate, dhd->hang_was_sent));
5947 goto done;
5948 }
5949
5950 if (dhd->busstate == DHD_BUS_SUSPEND) {
5951 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
5952 goto done;
5953 }
5954
5955 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5956
5957 if (ioc->cmd == WLC_SET_PM) {
5958 #ifdef DHD_PM_CONTROL_FROM_FILE
5959 if (g_pm_control == TRUE) {
5960 DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
5961 __FUNCTION__, buf ? *(char *)buf : 0));
5962 goto done;
5963 }
5964 #endif /* DHD_PM_CONTROL_FROM_FILE */
5965 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
5966 }
5967
5968 ASSERT(len <= WLC_IOCTL_MAXLEN);
5969
5970 if (len > WLC_IOCTL_MAXLEN)
5971 goto done;
5972
5973 action = ioc->set;
5974
5975 dhd_prot_wlioctl_intercept(dhd, ioc, buf);
5976
5977 if (action & WL_IOCTL_ACTION_SET) {
5978 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5979 } else {
5980 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5981 if (ret > 0)
5982 ioc->used = ret;
5983 }
5984
5985 /* Too many programs assume ioctl() returns 0 on success */
5986 if (ret >= 0) {
5987 ret = 0;
5988 } else {
5989 dhd->dongle_error = ret;
5990 }
5991
5992 dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
5993
5994 done:
5995 return ret;
5996
5997 } /* dhd_prot_ioctl */
5998
5999 /** test / loopback */
6000
6001 int
6002 dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
6003 {
6004 unsigned long flags;
6005 dhd_prot_t *prot = dhd->prot;
6006 uint16 alloced = 0;
6007
6008 ioct_reqst_hdr_t *ioct_rqst;
6009
6010 uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
6011 uint16 msglen = len + hdrlen;
6012 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6013
6014 msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
6015 msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
6016
6017 DHD_RING_LOCK(ring->ring_lock, flags);
6018
6019 ioct_rqst = (ioct_reqst_hdr_t *)
6020 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6021
6022 if (ioct_rqst == NULL) {
6023 DHD_RING_UNLOCK(ring->ring_lock, flags);
6024 return 0;
6025 }
6026
6027 {
6028 uint8 *ptr;
6029 uint16 i;
6030
6031 ptr = (uint8 *)ioct_rqst;
6032 for (i = 0; i < msglen; i++) {
6033 ptr[i] = i % 256;
6034 }
6035 }
6036
6037 /* Common msg buf hdr */
6038 ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6039 ring->seqnum++;
6040
6041 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
6042 ioct_rqst->msg.if_id = 0;
6043 ioct_rqst->msg.flags = ring->current_phase;
6044
6045 bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
6046
6047 /* update ring's WR index and ring doorbell to dongle */
6048 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
6049
6050 DHD_RING_UNLOCK(ring->ring_lock, flags);
6051
6052 return 0;
6053 }
6054
6055 /** test / loopback */
6056 void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
6057 {
6058 if (dmaxfer == NULL)
6059 return;
6060
6061 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
6062 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
6063 }
6064
6065 /** test / loopback */
6066 int
6067 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
6068 {
6069 dhd_prot_t *prot = dhdp->prot;
6070 dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
6071 dmaxref_mem_map_t *dmap = NULL;
6072
6073 dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
6074 if (!dmap) {
6075 DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
6076 goto mem_alloc_fail;
6077 }
6078 dmap->srcmem = &(dmaxfer->srcmem);
6079 dmap->dstmem = &(dmaxfer->dstmem);
6080
6081 DMAXFER_FREE(dhdp, dmap);
6082 return BCME_OK;
6083
6084 mem_alloc_fail:
6085 if (dmap) {
6086 MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
6087 dmap = NULL;
6088 }
6089 return BCME_NOMEM;
6090 } /* dhd_prepare_schedule_dmaxfer_free */
6091
6092 /** test / loopback */
6093 void
6094 dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
6095 {
6096
6097 dhd_dma_buf_free(dhdp, dmmap->srcmem);
6098 dhd_dma_buf_free(dhdp, dmmap->dstmem);
6099
6100 MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
6101 dmmap = NULL;
6102
6103 } /* dmaxfer_free_prev_dmaaddr */
6104
6105 /** test / loopback */
6106 int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
6107 uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
6108 {
6109 uint i = 0, j = 0;
6110 if (!dmaxfer)
6111 return BCME_ERROR;
6112
6113 /* First free up existing buffers */
6114 dmaxfer_free_dmaaddr(dhd, dmaxfer);
6115
6116 if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
6117 return BCME_NOMEM;
6118 }
6119
6120 if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
6121 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
6122 return BCME_NOMEM;
6123 }
6124
6125 dmaxfer->len = len;
6126
6127 /* Populate source with a pattern like below
6128 * 0x00000000
6129 * 0x01010101
6130 * 0x02020202
6131 * 0x03030303
6132 * 0x04040404
6133 * 0x05050505
6134 * ...
6135 * 0xFFFFFFFF
6136 */
6137 while (i < dmaxfer->len) {
6138 ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
6139 i++;
6140 if (i % 4 == 0) {
6141 j++;
6142 }
6143 }
6144
6145 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
6146
6147 dmaxfer->srcdelay = srcdelay;
6148 dmaxfer->destdelay = destdelay;
6149
6150 return BCME_OK;
6151 } /* dmaxfer_prepare_dmaaddr */
6152
6153 static void
6154 dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
6155 {
6156 dhd_prot_t *prot = dhd->prot;
6157 uint64 end_usec;
6158 pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
6159
6160 BCM_REFERENCE(cmplt);
6161 end_usec = OSL_SYSUPTIME_US();
6162
6163 DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
6164 prot->dmaxfer.status = cmplt->compl_hdr.status;
6165 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
6166 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
6167 if (memcmp(prot->dmaxfer.srcmem.va,
6168 prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
6169 cmplt->compl_hdr.status != BCME_OK) {
6170 DHD_ERROR(("DMA loopback failed\n"));
6171 prhex("XFER SRC: ",
6172 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
6173 prhex("XFER DST: ",
6174 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
6175 prot->dmaxfer.status = BCME_ERROR;
6176 }
6177 else {
6178 switch (prot->dmaxfer.d11_lpbk) {
6179 case M2M_DMA_LPBK: {
6180 DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
6181 } break;
6182 case D11_LPBK: {
6183 DHD_ERROR(("DMA successful with d11 loopback\n"));
6184 } break;
6185 case BMC_LPBK: {
6186 DHD_ERROR(("DMA successful with bmc loopback\n"));
6187 } break;
6188 case M2M_NON_DMA_LPBK: {
6189 DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
6190 } break;
6191 case D11_HOST_MEM_LPBK: {
6192 DHD_ERROR(("DMA successful d11 host mem loopback\n"));
6193 } break;
6194 case BMC_HOST_MEM_LPBK: {
6195 DHD_ERROR(("DMA successful bmc host mem loopback\n"));
6196 } break;
6197 default: {
6198 DHD_ERROR(("Invalid loopback option\n"));
6199 } break;
6200 }
6201
6202 if (DHD_LPBKDTDUMP_ON()) {
6203 /* debug info print of the Tx and Rx buffers */
6204 dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
6205 prot->dmaxfer.len, DHD_INFO_VAL);
6206 dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
6207 prot->dmaxfer.len, DHD_INFO_VAL);
6208 }
6209 }
6210 }
6211
6212 dhd_prepare_schedule_dmaxfer_free(dhd);
6213 end_usec -= prot->dmaxfer.start_usec;
6214 if (end_usec)
6215 DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
6216 prot->dmaxfer.len, (unsigned long)end_usec,
6217 (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
6218 dhd->prot->dmaxfer.in_progress = FALSE;
6219
6220 dhd->bus->dmaxfer_complete = TRUE;
6221 dhd_os_dmaxfer_wake(dhd);
6222 }
6223
6224 /** Test functionality.
6225 * Transfers bytes from host to dongle and to host again using DMA
6226 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
6227 * by a spinlock.
6228 */
6229 int
6230 dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
6231 uint d11_lpbk, uint core_num)
6232 {
6233 unsigned long flags;
6234 int ret = BCME_OK;
6235 dhd_prot_t *prot = dhd->prot;
6236 pcie_dma_xfer_params_t *dmap;
6237 uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
6238 uint16 alloced = 0;
6239 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6240
6241 if (prot->dmaxfer.in_progress) {
6242 DHD_ERROR(("DMA is in progress...\n"));
6243 return BCME_ERROR;
6244 }
6245
6246 if (d11_lpbk >= MAX_LPBK) {
6247 DHD_ERROR(("loopback mode should be either"
6248 " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
6249 return BCME_ERROR;
6250 }
6251
6252 DHD_RING_LOCK(ring->ring_lock, flags);
6253
6254 prot->dmaxfer.in_progress = TRUE;
6255 if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
6256 &prot->dmaxfer)) != BCME_OK) {
6257 prot->dmaxfer.in_progress = FALSE;
6258 DHD_RING_UNLOCK(ring->ring_lock, flags);
6259 return ret;
6260 }
6261
6262 dmap = (pcie_dma_xfer_params_t *)
6263 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6264
6265 if (dmap == NULL) {
6266 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
6267 prot->dmaxfer.in_progress = FALSE;
6268 DHD_RING_UNLOCK(ring->ring_lock, flags);
6269 return BCME_NOMEM;
6270 }
6271
6272 /* Common msg buf hdr */
6273 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
6274 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
6275 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6276 dmap->cmn_hdr.flags = ring->current_phase;
6277 ring->seqnum++;
6278
6279 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
6280 dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
6281 dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
6282 dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
6283 dmap->xfer_len = htol32(prot->dmaxfer.len);
6284 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
6285 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
6286 prot->dmaxfer.d11_lpbk = d11_lpbk;
6287 dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
6288 << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
6289 ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
6290 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
6291 prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
6292
6293 /* update ring's WR index and ring doorbell to dongle */
6294 dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
6295
6296 DHD_RING_UNLOCK(ring->ring_lock, flags);
6297
6298 DHD_ERROR(("DMA loopback Started...\n"));
6299
6300 return BCME_OK;
6301 } /* dhdmsgbuf_dmaxfer_req */
6302
6303 dma_xfer_status_t
6304 dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd)
6305 {
6306 dhd_prot_t *prot = dhd->prot;
6307
6308 if (prot->dmaxfer.in_progress)
6309 return DMA_XFER_IN_PROGRESS;
6310 else if (prot->dmaxfer.status == BCME_OK)
6311 return DMA_XFER_SUCCESS;
6312 else
6313 return DMA_XFER_FAILED;
6314 }
6315
6316 /** Called in the process of submitting an ioctl to the dongle */
6317 static int
6318 dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
6319 {
6320 int ret = 0;
6321 uint copylen = 0;
6322
6323 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6324
6325 if (dhd->bus->is_linkdown) {
6326 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
6327 __FUNCTION__));
6328 return -EIO;
6329 }
6330
6331 if (dhd->busstate == DHD_BUS_DOWN) {
6332 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
6333 return -EIO;
6334 }
6335
6336 /* don't talk to the dongle if fw is about to be reloaded */
6337 if (dhd->hang_was_sent) {
6338 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6339 __FUNCTION__));
6340 return -EIO;
6341 }
6342
6343 if (cmd == WLC_GET_VAR && buf)
6344 {
6345 if (!len || !*(uint8 *)buf) {
6346 DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
6347 ret = BCME_BADARG;
6348 goto done;
6349 }
6350
6351 /* Respond "bcmerror" and "bcmerrorstr" with local cache */
6352 copylen = MIN(len, BCME_STRLEN);
6353
6354 if ((len >= strlen("bcmerrorstr")) &&
6355 (!strcmp((char *)buf, "bcmerrorstr"))) {
6356 strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
6357 *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
6358 goto done;
6359 } else if ((len >= strlen("bcmerror")) &&
6360 !strcmp((char *)buf, "bcmerror")) {
6361 *(uint32 *)(uint32 *)buf = dhd->dongle_error;
6362 goto done;
6363 }
6364 }
6365
6366 DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
6367 action, ifidx, cmd, len));
6368
6369 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
6370
6371 if (ret < 0) {
6372 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6373 goto done;
6374 }
6375
6376 /* wait for IOCTL completion message from dongle and get first fragment */
6377 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
6378
6379 done:
6380 return ret;
6381 }
6382
6383 void
6384 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
6385 {
6386 uint32 intstatus;
6387 dhd_prot_t *prot = dhd->prot;
6388 dhd->rxcnt_timeout++;
6389 dhd->rx_ctlerrs++;
6390 dhd->iovar_timeout_occured = TRUE;
6391 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
6392 "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
6393 dhd->is_sched_error ? " due to scheduling problem" : "",
6394 dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
6395 prot->ioctl_state, dhd->busstate, prot->ioctl_received));
6396 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6397 if (dhd->is_sched_error && dhd->memdump_enabled) {
6398 /* change g_assert_type to trigger Kernel panic */
6399 g_assert_type = 2;
6400 /* use ASSERT() to trigger panic */
6401 ASSERT(0);
6402 }
6403 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6404
6405 if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
6406 prot->curr_ioctl_cmd == WLC_GET_VAR) {
6407 char iovbuf[32];
6408 int i;
6409 int dump_size = 128;
6410 uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
6411 memset(iovbuf, 0, sizeof(iovbuf));
6412 strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
6413 iovbuf[sizeof(iovbuf) - 1] = '\0';
6414 DHD_ERROR(("Current IOVAR (%s): %s\n",
6415 prot->curr_ioctl_cmd == WLC_SET_VAR ?
6416 "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
6417 DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
6418 for (i = 0; i < dump_size; i++) {
6419 DHD_ERROR(("%02X ", ioctl_buf[i]));
6420 if ((i % 32) == 31) {
6421 DHD_ERROR(("\n"));
6422 }
6423 }
6424 DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
6425 }
6426
6427 /* Check the PCIe link status by reading intstatus register */
6428 intstatus = si_corereg(dhd->bus->sih,
6429 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
6430 if (intstatus == (uint32)-1) {
6431 DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
6432 dhd->bus->is_linkdown = TRUE;
6433 }
6434
6435 dhd_bus_dump_console_buffer(dhd->bus);
6436 dhd_prot_debug_info_print(dhd);
6437 }
6438
6439 /**
6440 * Waits for IOCTL completion message from the dongle, copies this into caller
6441 * provided parameter 'buf'.
6442 */
6443 static int
6444 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
6445 {
6446 dhd_prot_t *prot = dhd->prot;
6447 int timeleft;
6448 unsigned long flags;
6449 int ret = 0;
6450
6451 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6452
6453 if (dhd_query_bus_erros(dhd)) {
6454 ret = -EIO;
6455 goto out;
6456 }
6457
6458 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
6459
6460 #ifdef DHD_RECOVER_TIMEOUT
6461 if (prot->ioctl_received == 0) {
6462 uint32 intstatus = si_corereg(dhd->bus->sih,
6463 dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
6464 int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
6465 if ((intstatus) && (intstatus != (uint32)-1) &&
6466 (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
6467 DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
6468 " host_irq_disabled=%d\n",
6469 __FUNCTION__, intstatus, host_irq_disbled));
6470 dhd_pcie_intr_count_dump(dhd);
6471 dhd_print_tasklet_status(dhd);
6472 dhd_prot_process_ctrlbuf(dhd);
6473 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
6474 /* Clear Interrupts */
6475 dhdpcie_bus_clear_intstatus(dhd->bus);
6476 }
6477 }
6478 #endif /* DHD_RECOVER_TIMEOUT */
6479
6480 if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
6481 /* check if resumed on time out related to scheduling issue */
6482 dhd->is_sched_error = FALSE;
6483 if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
6484 dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
6485 }
6486
6487 dhd_msgbuf_iovar_timeout_dump(dhd);
6488
6489 #ifdef DHD_FW_COREDUMP
6490 /* Collect socram dump */
6491 if (dhd->memdump_enabled) {
6492 /* collect core dump */
6493 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
6494 dhd_bus_mem_dump(dhd);
6495 }
6496 #endif /* DHD_FW_COREDUMP */
6497 #ifdef SUPPORT_LINKDOWN_RECOVERY
6498 #ifdef CONFIG_ARCH_MSM
6499 dhd->bus->no_cfg_restore = 1;
6500 #endif /* CONFIG_ARCH_MSM */
6501 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6502 ret = -ETIMEDOUT;
6503 goto out;
6504 } else {
6505 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
6506 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
6507 __FUNCTION__, prot->ioctl_received));
6508 ret = -EINVAL;
6509 goto out;
6510 }
6511 dhd->rxcnt_timeout = 0;
6512 dhd->rx_ctlpkts++;
6513 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
6514 __FUNCTION__, prot->ioctl_resplen));
6515 }
6516
6517 if (dhd->prot->ioctl_resplen > len)
6518 dhd->prot->ioctl_resplen = (uint16)len;
6519 if (buf)
6520 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
6521
6522 ret = (int)(dhd->prot->ioctl_status);
6523
6524 out:
6525 DHD_GENERAL_LOCK(dhd, flags);
6526 dhd->prot->ioctl_state = 0;
6527 dhd->prot->ioctl_resplen = 0;
6528 dhd->prot->ioctl_received = IOCTL_WAIT;
6529 dhd->prot->curr_ioctl_cmd = 0;
6530 DHD_GENERAL_UNLOCK(dhd, flags);
6531
6532 return ret;
6533 } /* dhd_msgbuf_wait_ioctl_cmplt */
6534
6535 static int
6536 dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
6537 {
6538 int ret = 0;
6539
6540 DHD_TRACE(("%s: Enter \n", __FUNCTION__));
6541
6542 if (dhd->bus->is_linkdown) {
6543 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
6544 __FUNCTION__));
6545 return -EIO;
6546 }
6547
6548 if (dhd->busstate == DHD_BUS_DOWN) {
6549 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
6550 return -EIO;
6551 }
6552
6553 /* don't talk to the dongle if fw is about to be reloaded */
6554 if (dhd->hang_was_sent) {
6555 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6556 __FUNCTION__));
6557 return -EIO;
6558 }
6559
6560 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
6561 action, ifidx, cmd, len));
6562
6563 /* Fill up msgbuf for ioctl req */
6564 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
6565
6566 if (ret < 0) {
6567 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6568 goto done;
6569 }
6570
6571 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
6572
6573 done:
6574 return ret;
6575 }
6576
6577 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
6578 int dhd_prot_ctl_complete(dhd_pub_t *dhd)
6579 {
6580 return 0;
6581 }
6582
6583 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
6584 int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
6585 void *params, int plen, void *arg, int len, bool set)
6586 {
6587 return BCME_UNSUPPORTED;
6588 }
6589
6590 #ifdef DHD_DUMP_PCIE_RINGS
6591 int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, unsigned long *file_posn)
6592 {
6593 dhd_prot_t *prot = dhd->prot;
6594 msgbuf_ring_t *ring;
6595 int ret = 0;
6596
6597 ring = &prot->h2dring_ctrl_subn;
6598 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6599 goto exit;
6600
6601 ring = &prot->d2hring_ctrl_cpln;
6602 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6603 goto exit;
6604
6605 ring = prot->h2dring_info_subn;
6606 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6607 goto exit;
6608
6609 ring = prot->d2hring_info_cpln;
6610 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6611 goto exit;
6612
6613 ring = &prot->d2hring_tx_cpln;
6614 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6615 goto exit;
6616
6617 ring = &prot->d2hring_rx_cpln;
6618 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6619 goto exit;
6620
6621 ring = prot->h2d_flowrings_pool;
6622 if ((ret = dhd_ring_write(dhd, ring, file, file_posn)) < 0)
6623 goto exit;
6624
6625 exit :
6626 return ret;
6627 }
6628
6629 /* Writes to file in TLV format */
6630 static
6631 int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, unsigned long *file_posn)
6632 {
6633 unsigned long flags;
6634 int len = 0;
6635 int ret = 0;
6636 uint16 loc_rd = 0;
6637
6638 while (len < ((ring->max_items) * (ring->item_len))) {
6639 uint8 *msg_addr;
6640 cmn_msg_hdr_t *msg;
6641
6642 DHD_RING_LOCK(ring->ring_lock, flags);
6643 msg_addr = (uint8*)ring->dma_buf.va + (loc_rd * ring->item_len);
6644 ASSERT(loc_rd < ring->max_items);
6645 DHD_RING_UNLOCK(ring->ring_lock, flags);
6646
6647 if (msg_addr == NULL) {
6648 return BCME_ERROR;
6649 }
6650 msg = (cmn_msg_hdr_t *)msg_addr;
6651
6652 ret = dhd_os_write_file_posn(file, file_posn, (char *)(&(msg->msg_type)),
6653 sizeof(msg->msg_type));
6654 if (ret < 0) {
6655 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
6656 return BCME_ERROR;
6657 }
6658 ret = dhd_os_write_file_posn(file, file_posn, (char *)(&(ring->item_len)),
6659 sizeof(ring->item_len));
6660 if (ret < 0) {
6661 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
6662 return BCME_ERROR;
6663 }
6664 ret = dhd_os_write_file_posn(file, file_posn, (char *)msg_addr, ring->item_len);
6665 if (ret < 0) {
6666 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
6667 return BCME_ERROR;
6668 }
6669
6670 len += ring->item_len;
6671 loc_rd += 1;
6672 }
6673 return BCME_OK;
6674 }
6675 #endif /* DHD_DUMP_PCIE_RINGS */
6676
6677 /** Add prot dump output to a buffer */
6678 void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
6679 {
6680
6681 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
6682 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
6683 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
6684 bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
6685 else
6686 bcm_bprintf(b, "\nd2h_sync: NONE:");
6687 bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
6688 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
6689
6690 bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
6691 dhd->dma_h2d_ring_upd_support,
6692 dhd->dma_d2h_ring_upd_support,
6693 dhd->prot->rw_index_sz);
6694 bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
6695 h2d_max_txpost, dhd->prot->h2d_max_txpost);
6696 }
6697
6698 /* Update local copy of dongle statistics */
6699 void dhd_prot_dstats(dhd_pub_t *dhd)
6700 {
6701 return;
6702 }
6703
6704 /** Called by upper DHD layer */
6705 int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
6706 uint reorder_info_len, void **pkt, uint32 *free_buf_count)
6707 {
6708 return 0;
6709 }
6710
6711 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
6712 int
6713 dhd_post_dummy_msg(dhd_pub_t *dhd)
6714 {
6715 unsigned long flags;
6716 hostevent_hdr_t *hevent = NULL;
6717 uint16 alloced = 0;
6718
6719 dhd_prot_t *prot = dhd->prot;
6720 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6721
6722 DHD_RING_LOCK(ring->ring_lock, flags);
6723
6724 hevent = (hostevent_hdr_t *)
6725 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6726
6727 if (hevent == NULL) {
6728 DHD_RING_UNLOCK(ring->ring_lock, flags);
6729 return -1;
6730 }
6731
6732 /* CMN msg header */
6733 hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6734 ring->seqnum++;
6735 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
6736 hevent->msg.if_id = 0;
6737 hevent->msg.flags = ring->current_phase;
6738
6739 /* Event payload */
6740 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
6741
6742 /* Since, we are filling the data directly into the bufptr obtained
6743 * from the msgbuf, we can directly call the write_complete
6744 */
6745 dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
6746
6747 DHD_RING_UNLOCK(ring->ring_lock, flags);
6748
6749 return 0;
6750 }
6751
6752 /**
6753 * If exactly_nitems is true, this function will allocate space for nitems or fail
6754 * If exactly_nitems is false, this function will allocate space for nitems or less
6755 */
6756 static void * BCMFASTPATH
6757 dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
6758 uint16 nitems, uint16 * alloced, bool exactly_nitems)
6759 {
6760 void * ret_buf;
6761
6762 /* Alloc space for nitems in the ring */
6763 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6764
6765 if (ret_buf == NULL) {
6766 /* if alloc failed , invalidate cached read ptr */
6767 if (dhd->dma_d2h_ring_upd_support) {
6768 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6769 } else {
6770 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
6771 #ifdef SUPPORT_LINKDOWN_RECOVERY
6772 /* Check if ring->rd is valid */
6773 if (ring->rd >= ring->max_items) {
6774 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
6775 dhd->bus->read_shm_fail = TRUE;
6776 return NULL;
6777 }
6778 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6779 }
6780
6781 /* Try allocating once more */
6782 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6783
6784 if (ret_buf == NULL) {
6785 DHD_INFO(("%s: Ring space not available \n", ring->name));
6786 return NULL;
6787 }
6788 }
6789
6790 if (ret_buf == HOST_RING_BASE(ring)) {
6791 DHD_INFO(("%s: setting the phase now\n", ring->name));
6792 ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6793 }
6794
6795 /* Return alloced space */
6796 return ret_buf;
6797 }
6798
6799 /**
6800 * Non inline ioct request.
6801 * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
6802 * Form a separate request buffer where a 4 byte cmn header is added in the front
6803 * buf contents from parent function is copied to remaining section of this buffer
6804 */
6805 static int
6806 dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
6807 {
6808 dhd_prot_t *prot = dhd->prot;
6809 ioctl_req_msg_t *ioct_rqst;
6810 void * ioct_buf; /* For ioctl payload */
6811 uint16 rqstlen, resplen;
6812 unsigned long flags;
6813 uint16 alloced = 0;
6814 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6815
6816 if (dhd_query_bus_erros(dhd)) {
6817 return -EIO;
6818 }
6819
6820 rqstlen = len;
6821 resplen = len;
6822
6823 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
6824 /* 8K allocation of dongle buffer fails */
6825 /* dhd doesnt give separate input & output buf lens */
6826 /* so making the assumption that input length can never be more than 2k */
6827 rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
6828
6829 DHD_RING_LOCK(ring->ring_lock, flags);
6830
6831 if (prot->ioctl_state) {
6832 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
6833 DHD_RING_UNLOCK(ring->ring_lock, flags);
6834 return BCME_BUSY;
6835 } else {
6836 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
6837 }
6838
6839 /* Request for cbuf space */
6840 ioct_rqst = (ioctl_req_msg_t*)
6841 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6842 if (ioct_rqst == NULL) {
6843 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
6844 prot->ioctl_state = 0;
6845 prot->curr_ioctl_cmd = 0;
6846 prot->ioctl_received = IOCTL_WAIT;
6847 DHD_RING_UNLOCK(ring->ring_lock, flags);
6848 return -1;
6849 }
6850
6851 /* Common msg buf hdr */
6852 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
6853 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
6854 ioct_rqst->cmn_hdr.flags = ring->current_phase;
6855 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
6856 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6857 ring->seqnum++;
6858
6859 ioct_rqst->cmd = htol32(cmd);
6860 prot->curr_ioctl_cmd = cmd;
6861 ioct_rqst->output_buf_len = htol16(resplen);
6862 prot->ioctl_trans_id++;
6863 ioct_rqst->trans_id = prot->ioctl_trans_id;
6864
6865 /* populate ioctl buffer info */
6866 ioct_rqst->input_buf_len = htol16(rqstlen);
6867 ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
6868 ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
6869 /* copy ioct payload */
6870 ioct_buf = (void *) prot->ioctbuf.va;
6871
6872 prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
6873
6874 if (buf)
6875 memcpy(ioct_buf, buf, len);
6876
6877 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
6878
6879 if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
6880 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
6881
6882 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
6883 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
6884 ioct_rqst->trans_id));
6885
6886 /* update ring's WR index and ring doorbell to dongle */
6887 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
6888
6889 DHD_RING_UNLOCK(ring->ring_lock, flags);
6890
6891 return 0;
6892 } /* dhd_fillup_ioct_reqst */
6893
6894 /**
6895 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
6896 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
6897 * information is posted to the dongle.
6898 *
6899 * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
6900 * each flowring in pool of flowrings.
6901 *
6902 * returns BCME_OK=0 on success
6903 * returns non-zero negative error value on failure.
6904 */
6905 static int
6906 dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
6907 uint16 max_items, uint16 item_len, uint16 ringid)
6908 {
6909 int dma_buf_alloced = BCME_NOMEM;
6910 uint32 dma_buf_len = max_items * item_len;
6911 dhd_prot_t *prot = dhd->prot;
6912 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6913
6914 ASSERT(ring);
6915 ASSERT(name);
6916 ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
6917
6918 /* Init name */
6919 strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
6920 ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
6921
6922 ring->idx = ringid;
6923
6924 ring->max_items = max_items;
6925 ring->item_len = item_len;
6926
6927 /* A contiguous space may be reserved for all flowrings */
6928 if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
6929 /* Carve out from the contiguous DMA-able flowring buffer */
6930 uint16 flowid;
6931 uint32 base_offset;
6932
6933 dhd_dma_buf_t *dma_buf = &ring->dma_buf;
6934 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
6935
6936 flowid = DHD_RINGID_TO_FLOWID(ringid);
6937 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
6938
6939 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
6940
6941 dma_buf->len = dma_buf_len;
6942 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
6943 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
6944 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
6945
6946 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
6947 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
6948
6949 dma_buf->dmah = rsv_buf->dmah;
6950 dma_buf->secdma = rsv_buf->secdma;
6951
6952 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6953 } else {
6954 /* Allocate a dhd_dma_buf */
6955 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
6956 if (dma_buf_alloced != BCME_OK) {
6957 return BCME_NOMEM;
6958 }
6959 }
6960
6961 /* CAUTION: Save ring::base_addr in little endian format! */
6962 dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
6963
6964 #ifdef BCM_SECURE_DMA
6965 if (SECURE_DMA_ENAB(prot->osh)) {
6966 ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
6967 if (ring->dma_buf.secdma == NULL) {
6968 goto free_dma_buf;
6969 }
6970 }
6971 #endif /* BCM_SECURE_DMA */
6972
6973 ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
6974
6975 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
6976 "ring start %p buf phys addr %x:%x \n",
6977 ring->name, ring->max_items, ring->item_len,
6978 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6979 ltoh32(ring->base_addr.low_addr)));
6980
6981 return BCME_OK;
6982
6983 #ifdef BCM_SECURE_DMA
6984 free_dma_buf:
6985 if (dma_buf_alloced == BCME_OK) {
6986 dhd_dma_buf_free(dhd, &ring->dma_buf);
6987 }
6988 #endif /* BCM_SECURE_DMA */
6989
6990 return BCME_NOMEM;
6991
6992 } /* dhd_prot_ring_attach */
6993
6994 /**
6995 * dhd_prot_ring_init - Post the common ring information to dongle.
6996 *
6997 * Used only for common rings.
6998 *
6999 * The flowrings information is passed via the create flowring control message
7000 * (tx_flowring_create_request_t) sent over the H2D control submission common
7001 * ring.
7002 */
7003 static void
7004 dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
7005 {
7006 ring->wr = 0;
7007 ring->rd = 0;
7008 ring->curr_rd = 0;
7009
7010 /* CAUTION: ring::base_addr already in Little Endian */
7011 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
7012 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
7013 dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
7014 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
7015 dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
7016 sizeof(uint16), RING_ITEM_LEN, ring->idx);
7017
7018 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
7019 sizeof(uint16), RING_WR_UPD, ring->idx);
7020 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7021 sizeof(uint16), RING_RD_UPD, ring->idx);
7022
7023 /* ring inited */
7024 ring->inited = TRUE;
7025
7026 } /* dhd_prot_ring_init */
7027
7028 /**
7029 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
7030 * Reset WR and RD indices to 0.
7031 */
7032 static void
7033 dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
7034 {
7035 DHD_TRACE(("%s\n", __FUNCTION__));
7036
7037 dhd_dma_buf_reset(dhd, &ring->dma_buf);
7038
7039 ring->rd = ring->wr = 0;
7040 ring->curr_rd = 0;
7041 ring->inited = FALSE;
7042 ring->create_pending = FALSE;
7043 }
7044
7045 /**
7046 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
7047 * hanging off the msgbuf_ring.
7048 */
7049 static void
7050 dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
7051 {
7052 dhd_prot_t *prot = dhd->prot;
7053 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
7054 ASSERT(ring);
7055
7056 ring->inited = FALSE;
7057 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
7058
7059 #ifdef BCM_SECURE_DMA
7060 if (SECURE_DMA_ENAB(prot->osh)) {
7061 if (ring->dma_buf.secdma) {
7062 SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
7063 MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
7064 ring->dma_buf.secdma = NULL;
7065 }
7066 }
7067 #endif /* BCM_SECURE_DMA */
7068
7069 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
7070 * memory, then simply stop using it.
7071 */
7072 if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
7073 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
7074 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
7075 } else {
7076 dhd_dma_buf_free(dhd, &ring->dma_buf);
7077 }
7078
7079 dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
7080
7081 } /* dhd_prot_ring_detach */
7082
7083 /*
7084 * +----------------------------------------------------------------------------
7085 * Flowring Pool
7086 *
7087 * Unlike common rings, which are attached very early on (dhd_prot_attach),
7088 * flowrings are dynamically instantiated. Moreover, flowrings may require a
7089 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
7090 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
7091 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
7092 *
7093 * Each DMA-able buffer may be allocated independently, or may be carved out
7094 * of a single large contiguous region that is registered with the protocol
7095 * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
7096 * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
7097 *
7098 * No flowring pool action is performed in dhd_prot_attach(), as the number
7099 * of h2d rings is not yet known.
7100 *
7101 * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
7102 * determine the number of flowrings required, and a pool of msgbuf_rings are
7103 * allocated and a DMA-able buffer (carved or allocated) is attached.
7104 * See: dhd_prot_flowrings_pool_attach()
7105 *
7106 * A flowring msgbuf_ring object may be fetched from this pool during flowring
7107 * creation, using the flowid. Likewise, flowrings may be freed back into the
7108 * pool on flowring deletion.
7109 * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
7110 *
7111 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
7112 * are detached (returned back to the carved region or freed), and the pool of
7113 * msgbuf_ring and any objects allocated against it are freed.
7114 * See: dhd_prot_flowrings_pool_detach()
7115 *
7116 * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
7117 * state as-if upon an attach. All DMA-able buffers are retained.
7118 * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
7119 * pool attach will notice that the pool persists and continue to use it. This
7120 * will avoid the case of a fragmented DMA-able region.
7121 *
7122 * +----------------------------------------------------------------------------
7123 */
7124
7125 /* Conversion of a flowid to a flowring pool index */
7126 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
7127 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
7128
7129 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
7130 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
7131 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
7132 DHD_FLOWRINGS_POOL_OFFSET(flowid)
7133
7134 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
7135 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
7136 for ((flowid) = DHD_FLOWRING_START_FLOWID, \
7137 (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
7138 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
7139 (ring)++, (flowid)++)
7140
7141 /* Fetch number of H2D flowrings given the total number of h2d rings */
7142 static uint16
7143 dhd_get_max_flow_rings(dhd_pub_t *dhd)
7144 {
7145 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
7146 return dhd->bus->max_tx_flowrings;
7147 else
7148 return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
7149 }
7150
7151 /**
7152 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
7153 *
7154 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
7155 * Dongle includes common rings when it advertizes the number of H2D rings.
7156 * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
7157 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
7158 *
7159 * dhd_prot_ring_attach is invoked to perform the actual initialization and
7160 * attaching the DMA-able buffer.
7161 *
7162 * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
7163 * initialized msgbuf_ring_t object.
7164 *
7165 * returns BCME_OK=0 on success
7166 * returns non-zero negative error value on failure.
7167 */
7168 static int
7169 dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
7170 {
7171 uint16 flowid;
7172 msgbuf_ring_t *ring;
7173 uint16 h2d_flowrings_total; /* exclude H2D common rings */
7174 dhd_prot_t *prot = dhd->prot;
7175 char ring_name[RING_NAME_MAX_LENGTH];
7176
7177 if (prot->h2d_flowrings_pool != NULL)
7178 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
7179
7180 ASSERT(prot->h2d_rings_total == 0);
7181
7182 /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
7183 prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
7184
7185 if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
7186 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
7187 __FUNCTION__, prot->h2d_rings_total));
7188 return BCME_ERROR;
7189 }
7190
7191 /* Subtract number of H2D common rings, to determine number of flowrings */
7192 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7193
7194 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
7195
7196 /* Allocate pool of msgbuf_ring_t objects for all flowrings */
7197 prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
7198 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
7199
7200 if (prot->h2d_flowrings_pool == NULL) {
7201 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
7202 __FUNCTION__, h2d_flowrings_total));
7203 goto fail;
7204 }
7205
7206 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
7207 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7208 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
7209 if (dhd_prot_ring_attach(dhd, ring, ring_name,
7210 prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
7211 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
7212 goto attach_fail;
7213 }
7214 }
7215
7216 return BCME_OK;
7217
7218 attach_fail:
7219 dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
7220
7221 fail:
7222 prot->h2d_rings_total = 0;
7223 return BCME_NOMEM;
7224
7225 } /* dhd_prot_flowrings_pool_attach */
7226
7227 /**
7228 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
7229 * Invokes dhd_prot_ring_reset to perform the actual reset.
7230 *
7231 * The DMA-able buffer is not freed during reset and neither is the flowring
7232 * pool freed.
7233 *
7234 * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
7235 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
7236 * from a previous flowring pool instantiation will be reused.
7237 *
7238 * This will avoid a fragmented DMA-able memory condition, if multiple
7239 * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
7240 * cycle.
7241 */
7242 static void
7243 dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
7244 {
7245 uint16 flowid, h2d_flowrings_total;
7246 msgbuf_ring_t *ring;
7247 dhd_prot_t *prot = dhd->prot;
7248
7249 if (prot->h2d_flowrings_pool == NULL) {
7250 ASSERT(prot->h2d_rings_total == 0);
7251 return;
7252 }
7253 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7254 /* Reset each flowring in the flowring pool */
7255 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7256 dhd_prot_ring_reset(dhd, ring);
7257 ring->inited = FALSE;
7258 }
7259
7260 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
7261 }
7262
7263 /**
7264 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
7265 * DMA-able buffers for flowrings.
7266 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
7267 * de-initialization of each msgbuf_ring_t.
7268 */
7269 static void
7270 dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
7271 {
7272 int flowid;
7273 msgbuf_ring_t *ring;
7274 uint16 h2d_flowrings_total; /* exclude H2D common rings */
7275 dhd_prot_t *prot = dhd->prot;
7276
7277 if (prot->h2d_flowrings_pool == NULL) {
7278 ASSERT(prot->h2d_rings_total == 0);
7279 return;
7280 }
7281
7282 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
7283 /* Detach the DMA-able buffer for each flowring in the flowring pool */
7284 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
7285 dhd_prot_ring_detach(dhd, ring);
7286 }
7287
7288 MFREE(prot->osh, prot->h2d_flowrings_pool,
7289 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
7290
7291 prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
7292 prot->h2d_rings_total = 0;
7293
7294 } /* dhd_prot_flowrings_pool_detach */
7295
7296 /**
7297 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
7298 * msgbuf_ring from the flowring pool, and assign it.
7299 *
7300 * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
7301 * ring information to the dongle, a flowring's information is passed via a
7302 * flowring create control message.
7303 *
7304 * Only the ring state (WR, RD) index are initialized.
7305 */
7306 static msgbuf_ring_t *
7307 dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
7308 {
7309 msgbuf_ring_t *ring;
7310 dhd_prot_t *prot = dhd->prot;
7311
7312 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
7313 ASSERT(flowid < prot->h2d_rings_total);
7314 ASSERT(prot->h2d_flowrings_pool != NULL);
7315
7316 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
7317
7318 /* ASSERT flow_ring->inited == FALSE */
7319
7320 ring->wr = 0;
7321 ring->rd = 0;
7322 ring->curr_rd = 0;
7323 ring->inited = TRUE;
7324 /**
7325 * Every time a flowring starts dynamically, initialize current_phase with 0
7326 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
7327 */
7328 ring->current_phase = 0;
7329 return ring;
7330 }
7331
7332 /**
7333 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
7334 * msgbuf_ring back to the flow_ring pool.
7335 */
7336 void
7337 dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
7338 {
7339 msgbuf_ring_t *ring;
7340 dhd_prot_t *prot = dhd->prot;
7341
7342 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
7343 ASSERT(flowid < prot->h2d_rings_total);
7344 ASSERT(prot->h2d_flowrings_pool != NULL);
7345
7346 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
7347
7348 ASSERT(ring == (msgbuf_ring_t*)flow_ring);
7349 /* ASSERT flow_ring->inited == TRUE */
7350
7351 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
7352
7353 ring->wr = 0;
7354 ring->rd = 0;
7355 ring->inited = FALSE;
7356
7357 ring->curr_rd = 0;
7358 }
7359
7360 /* Assumes only one index is updated at a time */
7361 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
7362 /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
7363 /* If exactly_nitems is false, this function will allocate space for nitems or less */
7364 static void *BCMFASTPATH
7365 dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
7366 bool exactly_nitems)
7367 {
7368 void *ret_ptr = NULL;
7369 uint16 ring_avail_cnt;
7370
7371 ASSERT(nitems <= ring->max_items);
7372
7373 ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
7374
7375 if ((ring_avail_cnt == 0) ||
7376 (exactly_nitems && (ring_avail_cnt < nitems) &&
7377 ((ring->max_items - ring->wr) >= nitems))) {
7378 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
7379 ring->name, nitems, ring->wr, ring->rd));
7380 return NULL;
7381 }
7382 *alloced = MIN(nitems, ring_avail_cnt);
7383
7384 /* Return next available space */
7385 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
7386
7387 /* Update write index */
7388 if ((ring->wr + *alloced) == ring->max_items)
7389 ring->wr = 0;
7390 else if ((ring->wr + *alloced) < ring->max_items)
7391 ring->wr += *alloced;
7392 else {
7393 /* Should never hit this */
7394 ASSERT(0);
7395 return NULL;
7396 }
7397
7398 return ret_ptr;
7399 } /* dhd_prot_get_ring_space */
7400
7401 /**
7402 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
7403 * new messages in a H2D ring. The messages are flushed from cache prior to
7404 * posting the new WR index. The new WR index will be updated in the DMA index
7405 * array or directly in the dongle's ring state memory.
7406 * A PCIE doorbell will be generated to wake up the dongle.
7407 * This is a non-atomic function, make sure the callers
7408 * always hold appropriate locks.
7409 */
7410 static void BCMFASTPATH
7411 __dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
7412 uint16 nitems)
7413 {
7414 dhd_prot_t *prot = dhd->prot;
7415 uint32 db_index;
7416 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
7417 uint corerev;
7418
7419 /* cache flush */
7420 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
7421
7422 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
7423 dhd_prot_dma_indx_set(dhd, ring->wr,
7424 H2D_DMA_INDX_WR_UPD, ring->idx);
7425 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
7426 dhd_prot_dma_indx_set(dhd, ring->wr,
7427 H2D_IFRM_INDX_WR_UPD, ring->idx);
7428 } else {
7429 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
7430 sizeof(uint16), RING_WR_UPD, ring->idx);
7431 }
7432
7433 /* raise h2d interrupt */
7434 if (IDMA_ACTIVE(dhd) ||
7435 (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
7436 db_index = IDMA_IDX0;
7437 /* this api is called in wl down path..in that case sih is freed already */
7438 if (dhd->bus->sih) {
7439 corerev = dhd->bus->sih->buscorerev;
7440 /* We need to explictly configure the type of DMA for core rev >= 24 */
7441 if (corerev >= 24) {
7442 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
7443 }
7444 }
7445 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
7446 } else {
7447 prot->mb_ring_fn(dhd->bus, ring->wr);
7448 }
7449 }
7450
7451 static void BCMFASTPATH
7452 dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
7453 uint16 nitems)
7454 {
7455 unsigned long flags_bus;
7456 DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
7457 __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
7458 DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
7459 }
7460
7461 /**
7462 * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
7463 * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
7464 * to indicate D3_INFORM sent in the same BUS_LOCK.
7465 */
7466 static void BCMFASTPATH
7467 dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
7468 uint16 nitems, uint32 mb_data)
7469 {
7470 unsigned long flags_bus;
7471
7472 DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
7473
7474 __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
7475
7476 /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
7477 if (mb_data == H2D_HOST_D3_INFORM) {
7478 dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
7479 }
7480
7481 DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
7482 }
7483
7484 /**
7485 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
7486 * from a D2H ring. The new RD index will be updated in the DMA Index array or
7487 * directly in dongle's ring state memory.
7488 */
7489 static void
7490 dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
7491 {
7492 dhd_prot_t *prot = dhd->prot;
7493 uint32 db_index;
7494 uint corerev;
7495
7496 /* update read index */
7497 /* If dma'ing h2d indices supported
7498 * update the r -indices in the
7499 * host memory o/w in TCM
7500 */
7501 if (IDMA_ACTIVE(dhd)) {
7502 dhd_prot_dma_indx_set(dhd, ring->rd,
7503 D2H_DMA_INDX_RD_UPD, ring->idx);
7504 db_index = IDMA_IDX1;
7505 if (dhd->bus->sih) {
7506 corerev = dhd->bus->sih->buscorerev;
7507 /* We need to explictly configure the type of DMA for core rev >= 24 */
7508 if (corerev >= 24) {
7509 db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
7510 }
7511 }
7512 prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
7513 } else if (dhd->dma_h2d_ring_upd_support) {
7514 dhd_prot_dma_indx_set(dhd, ring->rd,
7515 D2H_DMA_INDX_RD_UPD, ring->idx);
7516 } else {
7517 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7518 sizeof(uint16), RING_RD_UPD, ring->idx);
7519 }
7520 }
7521
7522 static int
7523 dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
7524 uint16 ring_type, uint32 req_id)
7525 {
7526 unsigned long flags;
7527 d2h_ring_create_req_t *d2h_ring;
7528 uint16 alloced = 0;
7529 int ret = BCME_OK;
7530 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7531 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
7532
7533 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
7534
7535 DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
7536
7537 if (ring_to_create == NULL) {
7538 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7539 ret = BCME_ERROR;
7540 goto err;
7541 }
7542
7543 /* Request for ring buffer space */
7544 d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
7545 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7546 &alloced, FALSE);
7547
7548 if (d2h_ring == NULL) {
7549 DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
7550 __FUNCTION__));
7551 ret = BCME_NOMEM;
7552 goto err;
7553 }
7554 ring_to_create->create_req_id = (uint16)req_id;
7555 ring_to_create->create_pending = TRUE;
7556
7557 /* Common msg buf hdr */
7558 d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
7559 d2h_ring->msg.if_id = 0;
7560 d2h_ring->msg.flags = ctrl_ring->current_phase;
7561 d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7562 d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
7563 d2h_ring->ring_type = ring_type;
7564 d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
7565 d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
7566 d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7567 d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7568
7569 d2h_ring->flags = 0;
7570 d2h_ring->msg.epoch =
7571 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
7572 ctrl_ring->seqnum++;
7573
7574 /* Update the flow_ring's WRITE index */
7575 dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
7576 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7577
7578 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
7579
7580 return ret;
7581 err:
7582 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
7583
7584 return ret;
7585 }
7586
7587 static int
7588 dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
7589 {
7590 unsigned long flags;
7591 h2d_ring_create_req_t *h2d_ring;
7592 uint16 alloced = 0;
7593 uint8 i = 0;
7594 int ret = BCME_OK;
7595 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
7596
7597 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
7598
7599 DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
7600
7601 if (ring_to_create == NULL) {
7602 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7603 ret = BCME_ERROR;
7604 goto err;
7605 }
7606
7607 /* Request for ring buffer space */
7608 h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
7609 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7610 &alloced, FALSE);
7611
7612 if (h2d_ring == NULL) {
7613 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
7614 __FUNCTION__));
7615 ret = BCME_NOMEM;
7616 goto err;
7617 }
7618 ring_to_create->create_req_id = (uint16)id;
7619 ring_to_create->create_pending = TRUE;
7620
7621 /* Common msg buf hdr */
7622 h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
7623 h2d_ring->msg.if_id = 0;
7624 h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7625 h2d_ring->msg.flags = ctrl_ring->current_phase;
7626 h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
7627 h2d_ring->ring_type = ring_type;
7628 h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
7629 h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
7630 h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
7631 h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7632 h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7633
7634 for (i = 0; i < ring_to_create->n_completion_ids; i++) {
7635 h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
7636 }
7637
7638 h2d_ring->flags = 0;
7639 h2d_ring->msg.epoch =
7640 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
7641 ctrl_ring->seqnum++;
7642
7643 /* Update the flow_ring's WRITE index */
7644 dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
7645 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7646
7647 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
7648
7649 return ret;
7650 err:
7651 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
7652
7653 return ret;
7654 }
7655
7656 /**
7657 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
7658 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
7659 * See dhd_prot_dma_indx_init()
7660 */
7661 void
7662 dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
7663 {
7664 uint8 *ptr;
7665 uint16 offset;
7666 dhd_prot_t *prot = dhd->prot;
7667 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7668
7669 switch (type) {
7670 case H2D_DMA_INDX_WR_UPD:
7671 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7672 offset = DHD_H2D_RING_OFFSET(ringid);
7673 break;
7674
7675 case D2H_DMA_INDX_RD_UPD:
7676 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7677 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7678 break;
7679
7680 case H2D_IFRM_INDX_WR_UPD:
7681 ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
7682 offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
7683 break;
7684
7685 default:
7686 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7687 __FUNCTION__));
7688 return;
7689 }
7690
7691 ASSERT(prot->rw_index_sz != 0);
7692 ptr += offset * prot->rw_index_sz;
7693
7694 *(uint16*)ptr = htol16(new_index);
7695
7696 OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
7697
7698 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7699 __FUNCTION__, new_index, type, ringid, ptr, offset));
7700
7701 } /* dhd_prot_dma_indx_set */
7702
7703 /**
7704 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
7705 * array.
7706 * Dongle DMAes an entire array to host memory (if the feature is enabled).
7707 * See dhd_prot_dma_indx_init()
7708 */
7709 static uint16
7710 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
7711 {
7712 uint8 *ptr;
7713 uint16 data;
7714 uint16 offset;
7715 dhd_prot_t *prot = dhd->prot;
7716 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7717
7718 switch (type) {
7719 case H2D_DMA_INDX_WR_UPD:
7720 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7721 offset = DHD_H2D_RING_OFFSET(ringid);
7722 break;
7723
7724 case H2D_DMA_INDX_RD_UPD:
7725 ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
7726 offset = DHD_H2D_RING_OFFSET(ringid);
7727 break;
7728
7729 case D2H_DMA_INDX_WR_UPD:
7730 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
7731 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7732 break;
7733
7734 case D2H_DMA_INDX_RD_UPD:
7735 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7736 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7737 break;
7738
7739 default:
7740 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7741 __FUNCTION__));
7742 return 0;
7743 }
7744
7745 ASSERT(prot->rw_index_sz != 0);
7746 ptr += offset * prot->rw_index_sz;
7747
7748 OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
7749
7750 data = LTOH16(*((uint16*)ptr));
7751
7752 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7753 __FUNCTION__, data, type, ringid, ptr, offset));
7754
7755 return (data);
7756
7757 } /* dhd_prot_dma_indx_get */
7758
7759 /**
7760 * An array of DMA read/write indices, containing information about host rings, can be maintained
7761 * either in host memory or in device memory, dependent on preprocessor options. This function is,
7762 * dependent on these options, called during driver initialization. It reserves and initializes
7763 * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
7764 * address of these host memory blocks are communicated to the dongle later on. By reading this host
7765 * memory, the dongle learns about the state of the host rings.
7766 */
7767
7768 static INLINE int
7769 dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
7770 dhd_dma_buf_t *dma_buf, uint32 bufsz)
7771 {
7772 int rc;
7773
7774 if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
7775 return BCME_OK;
7776
7777 rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
7778
7779 return rc;
7780 }
7781
7782 int
7783 dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
7784 {
7785 uint32 bufsz;
7786 dhd_prot_t *prot = dhd->prot;
7787 dhd_dma_buf_t *dma_buf;
7788
7789 if (prot == NULL) {
7790 DHD_ERROR(("prot is not inited\n"));
7791 return BCME_ERROR;
7792 }
7793
7794 /* Dongle advertizes 2B or 4B RW index size */
7795 ASSERT(rw_index_sz != 0);
7796 prot->rw_index_sz = rw_index_sz;
7797
7798 bufsz = rw_index_sz * length;
7799
7800 switch (type) {
7801 case H2D_DMA_INDX_WR_BUF:
7802 dma_buf = &prot->h2d_dma_indx_wr_buf;
7803 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7804 goto ret_no_mem;
7805 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
7806 dma_buf->len, rw_index_sz, length));
7807 break;
7808
7809 case H2D_DMA_INDX_RD_BUF:
7810 dma_buf = &prot->h2d_dma_indx_rd_buf;
7811 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7812 goto ret_no_mem;
7813 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
7814 dma_buf->len, rw_index_sz, length));
7815 break;
7816
7817 case D2H_DMA_INDX_WR_BUF:
7818 dma_buf = &prot->d2h_dma_indx_wr_buf;
7819 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7820 goto ret_no_mem;
7821 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
7822 dma_buf->len, rw_index_sz, length));
7823 break;
7824
7825 case D2H_DMA_INDX_RD_BUF:
7826 dma_buf = &prot->d2h_dma_indx_rd_buf;
7827 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7828 goto ret_no_mem;
7829 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
7830 dma_buf->len, rw_index_sz, length));
7831 break;
7832
7833 case H2D_IFRM_INDX_WR_BUF:
7834 dma_buf = &prot->h2d_ifrm_indx_wr_buf;
7835 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7836 goto ret_no_mem;
7837 DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
7838 dma_buf->len, rw_index_sz, length));
7839 break;
7840
7841 default:
7842 DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
7843 return BCME_BADOPTION;
7844 }
7845
7846 return BCME_OK;
7847
7848 ret_no_mem:
7849 DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
7850 __FUNCTION__, type, bufsz));
7851 return BCME_NOMEM;
7852
7853 } /* dhd_prot_dma_indx_init */
7854
7855 /**
7856 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
7857 * from, or NULL if there are no more messages to read.
7858 */
7859 static uint8*
7860 dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
7861 {
7862 uint16 wr;
7863 uint16 rd;
7864 uint16 depth;
7865 uint16 items;
7866 void *read_addr = NULL; /* address of next msg to be read in ring */
7867 uint16 d2h_wr = 0;
7868
7869 DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
7870 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
7871 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
7872
7873 /* Remember the read index in a variable.
7874 * This is becuase ring->rd gets updated in the end of this function
7875 * So if we have to print the exact read index from which the
7876 * message is read its not possible.
7877 */
7878 ring->curr_rd = ring->rd;
7879
7880 /* update write pointer */
7881 if (dhd->dma_d2h_ring_upd_support) {
7882 /* DMAing write/read indices supported */
7883 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
7884 ring->wr = d2h_wr;
7885 } else {
7886 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
7887 }
7888
7889 wr = ring->wr;
7890 rd = ring->rd;
7891 depth = ring->max_items;
7892
7893 /* check for avail space, in number of ring items */
7894 items = READ_AVAIL_SPACE(wr, rd, depth);
7895 if (items == 0)
7896 return NULL;
7897
7898 /*
7899 * Note that there are builds where Assert translates to just printk
7900 * so, even if we had hit this condition we would never halt. Now
7901 * dhd_prot_process_msgtype can get into an big loop if this
7902 * happens.
7903 */
7904 if (items > ring->max_items) {
7905 DHD_ERROR(("\r\n======================= \r\n"));
7906 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
7907 __FUNCTION__, ring, ring->name, ring->max_items, items));
7908 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
7909 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
7910 dhd->busstate, dhd->bus->wait_for_d3_ack));
7911 DHD_ERROR(("\r\n======================= \r\n"));
7912 #ifdef SUPPORT_LINKDOWN_RECOVERY
7913 if (wr >= ring->max_items) {
7914 dhd->bus->read_shm_fail = TRUE;
7915 }
7916 #else
7917 #ifdef DHD_FW_COREDUMP
7918 if (dhd->memdump_enabled) {
7919 /* collect core dump */
7920 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
7921 dhd_bus_mem_dump(dhd);
7922
7923 }
7924 #endif /* DHD_FW_COREDUMP */
7925 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7926
7927 *available_len = 0;
7928 dhd_schedule_reset(dhd);
7929
7930 return NULL;
7931 }
7932
7933 /* if space is available, calculate address to be read */
7934 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
7935
7936 /* update read pointer */
7937 if ((ring->rd + items) >= ring->max_items)
7938 ring->rd = 0;
7939 else
7940 ring->rd += items;
7941
7942 ASSERT(ring->rd < ring->max_items);
7943
7944 /* convert items to bytes : available_len must be 32bits */
7945 *available_len = (uint32)(items * ring->item_len);
7946
7947 OSL_CACHE_INV(read_addr, *available_len);
7948
7949 /* return read address */
7950 return read_addr;
7951
7952 } /* dhd_prot_get_read_addr */
7953
7954 /**
7955 * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
7956 * make sure the callers always hold appropriate locks.
7957 */
7958 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
7959 {
7960 h2d_mailbox_data_t *h2d_mb_data;
7961 uint16 alloced = 0;
7962 msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
7963 unsigned long flags;
7964 int num_post = 1;
7965 int i;
7966
7967 DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
7968 __FUNCTION__, mb_data));
7969 if (!ctrl_ring->inited) {
7970 DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
7971 return BCME_ERROR;
7972 }
7973
7974 for (i = 0; i < num_post; i ++) {
7975 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
7976 /* Request for ring buffer space */
7977 h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
7978 ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7979 &alloced, FALSE);
7980
7981 if (h2d_mb_data == NULL) {
7982 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
7983 __FUNCTION__));
7984 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
7985 return BCME_NOMEM;
7986 }
7987
7988 memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
7989 /* Common msg buf hdr */
7990 h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
7991 h2d_mb_data->msg.flags = ctrl_ring->current_phase;
7992
7993 h2d_mb_data->msg.epoch =
7994 ctrl_ring->seqnum % H2D_EPOCH_MODULO;
7995 ctrl_ring->seqnum++;
7996
7997 /* Update flow create message */
7998 h2d_mb_data->mail_box_data = htol32(mb_data);
7999 {
8000 h2d_mb_data->mail_box_data = htol32(mb_data);
8001 }
8002
8003 DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
8004
8005 /* upd wrt ptr and raise interrupt */
8006 dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
8007 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
8008
8009 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8010
8011 }
8012 return 0;
8013 }
8014
8015 /** Creates a flow ring and informs dongle of this event */
8016 int
8017 dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8018 {
8019 tx_flowring_create_request_t *flow_create_rqst;
8020 msgbuf_ring_t *flow_ring;
8021 dhd_prot_t *prot = dhd->prot;
8022 unsigned long flags;
8023 uint16 alloced = 0;
8024 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8025 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
8026
8027 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
8028 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
8029 if (flow_ring == NULL) {
8030 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
8031 __FUNCTION__, flow_ring_node->flowid));
8032 return BCME_NOMEM;
8033 }
8034
8035 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8036
8037 /* Request for ctrl_ring buffer space */
8038 flow_create_rqst = (tx_flowring_create_request_t *)
8039 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
8040
8041 if (flow_create_rqst == NULL) {
8042 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
8043 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
8044 __FUNCTION__, flow_ring_node->flowid));
8045 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8046 return BCME_NOMEM;
8047 }
8048
8049 flow_ring_node->prot_info = (void *)flow_ring;
8050
8051 /* Common msg buf hdr */
8052 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
8053 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8054 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
8055 flow_create_rqst->msg.flags = ctrl_ring->current_phase;
8056
8057 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8058 ctrl_ring->seqnum++;
8059
8060 /* Update flow create message */
8061 flow_create_rqst->tid = flow_ring_node->flow_info.tid;
8062 flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8063 memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
8064 memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
8065 /* CAUTION: ring::base_addr already in Little Endian */
8066 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
8067 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
8068 flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
8069 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
8070
8071 /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
8072 * currently it is not used for priority. so uses solely for ifrm mask
8073 */
8074 if (IFRM_ACTIVE(dhd))
8075 flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
8076
8077 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
8078 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
8079 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
8080 flow_ring_node->flow_info.ifindex));
8081
8082 /* Update the flow_ring's WRITE index */
8083 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8084 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8085 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
8086 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
8087 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8088 H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
8089 } else {
8090 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
8091 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
8092 }
8093
8094 /* update control subn ring's WR index and ring doorbell to dongle */
8095 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
8096
8097 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8098
8099 return BCME_OK;
8100 } /* dhd_prot_flow_ring_create */
8101
8102 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
8103 static void
8104 dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
8105 {
8106 tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
8107
8108 DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
8109 ltoh16(flow_create_resp->cmplt.status),
8110 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
8111
8112 dhd_bus_flow_ring_create_response(dhd->bus,
8113 ltoh16(flow_create_resp->cmplt.flow_ring_id),
8114 ltoh16(flow_create_resp->cmplt.status));
8115 }
8116
8117 static void
8118 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
8119 {
8120 h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
8121 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
8122 ltoh16(resp->cmplt.status),
8123 ltoh16(resp->cmplt.ring_id),
8124 ltoh32(resp->cmn_hdr.request_id)));
8125 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
8126 (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
8127 DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
8128 return;
8129 }
8130 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
8131 !dhd->prot->h2dring_info_subn->create_pending) {
8132 DHD_ERROR(("info ring create status for not pending submit ring\n"));
8133 }
8134
8135 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
8136 DHD_ERROR(("info/btlog ring create failed with status %d\n",
8137 ltoh16(resp->cmplt.status)));
8138 return;
8139 }
8140 if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
8141 dhd->prot->h2dring_info_subn->create_pending = FALSE;
8142 dhd->prot->h2dring_info_subn->inited = TRUE;
8143 DHD_ERROR(("info buffer post after ring create\n"));
8144 dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
8145 }
8146 }
8147
8148 static void
8149 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
8150 {
8151 d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
8152 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
8153 ltoh16(resp->cmplt.status),
8154 ltoh16(resp->cmplt.ring_id),
8155 ltoh32(resp->cmn_hdr.request_id)));
8156 if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
8157 (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID)) {
8158 DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
8159 return;
8160 }
8161 if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
8162 if (!dhd->prot->d2hring_info_cpln->create_pending) {
8163 DHD_ERROR(("info ring create status for not pending cpl ring\n"));
8164 return;
8165 }
8166
8167 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
8168 DHD_ERROR(("info cpl ring create failed with status %d\n",
8169 ltoh16(resp->cmplt.status)));
8170 return;
8171 }
8172 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
8173 dhd->prot->d2hring_info_cpln->inited = TRUE;
8174 }
8175 }
8176
8177 static void
8178 dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
8179 {
8180 d2h_mailbox_data_t *d2h_data;
8181
8182 d2h_data = (d2h_mailbox_data_t *)buf;
8183 DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
8184 d2h_data->d2h_mailbox_data));
8185 dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
8186 }
8187
8188 static void
8189 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
8190 {
8191 DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
8192
8193 }
8194
8195 /** called on e.g. flow ring delete */
8196 void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
8197 {
8198 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
8199 dhd_prot_ring_detach(dhd, flow_ring);
8200 DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
8201 }
8202
8203 void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
8204 struct bcmstrbuf *strbuf, const char * fmt)
8205 {
8206 const char *default_fmt =
8207 "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
8208 "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
8209 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
8210 uint16 rd, wr;
8211 uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
8212
8213 if (fmt == NULL) {
8214 fmt = default_fmt;
8215 }
8216
8217 if (dhd->bus->is_linkdown) {
8218 DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
8219 return;
8220 }
8221
8222 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
8223 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
8224 bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
8225 ltoh32(flow_ring->base_addr.high_addr),
8226 ltoh32(flow_ring->base_addr.low_addr),
8227 flow_ring->item_len, flow_ring->max_items,
8228 dma_buf_len);
8229 }
8230
8231 void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8232 {
8233 dhd_prot_t *prot = dhd->prot;
8234 bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
8235 dhd->prot->device_ipc_version,
8236 dhd->prot->host_ipc_version,
8237 dhd->prot->active_ipc_version);
8238
8239 bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
8240 dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
8241 bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
8242 dhd->prot->max_infobufpost, dhd->prot->infobufpost);
8243 bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
8244 dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
8245 bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
8246 dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
8247 bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
8248 dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
8249
8250 bcm_bprintf(strbuf,
8251 "%14s %5s %5s %17s %17s %14s %14s %10s\n",
8252 "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
8253 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
8254 bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
8255 dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
8256 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8257 bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
8258 dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
8259 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8260 bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
8261 dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
8262 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8263 bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
8264 dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
8265 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8266 bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
8267 dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
8268 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8269 if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
8270 bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
8271 dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
8272 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8273 bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
8274 dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
8275 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8276 }
8277
8278 bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
8279 OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
8280 DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
8281 DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
8282 DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
8283
8284 }
8285
8286 int
8287 dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8288 {
8289 tx_flowring_delete_request_t *flow_delete_rqst;
8290 dhd_prot_t *prot = dhd->prot;
8291 unsigned long flags;
8292 uint16 alloced = 0;
8293 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8294
8295 DHD_RING_LOCK(ring->ring_lock, flags);
8296
8297 /* Request for ring buffer space */
8298 flow_delete_rqst = (tx_flowring_delete_request_t *)
8299 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8300
8301 if (flow_delete_rqst == NULL) {
8302 DHD_RING_UNLOCK(ring->ring_lock, flags);
8303 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
8304 return BCME_NOMEM;
8305 }
8306
8307 /* Common msg buf hdr */
8308 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
8309 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8310 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
8311 flow_delete_rqst->msg.flags = ring->current_phase;
8312
8313 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8314 ring->seqnum++;
8315
8316 /* Update Delete info */
8317 flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8318 flow_delete_rqst->reason = htol16(BCME_OK);
8319
8320 DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
8321 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
8322 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
8323 flow_ring_node->flow_info.ifindex));
8324
8325 /* update ring's WR index and ring doorbell to dongle */
8326 dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
8327
8328 DHD_RING_UNLOCK(ring->ring_lock, flags);
8329
8330 return BCME_OK;
8331 }
8332
8333 static void BCMFASTPATH
8334 dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
8335 {
8336 flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
8337 msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
8338 host_txbuf_cmpl_t txstatus;
8339 host_txbuf_post_t *txdesc;
8340 uint16 wr_idx;
8341
8342 DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
8343 __FUNCTION__, flowid, rd_idx, ring->wr));
8344
8345 memset(&txstatus, 0, sizeof(txstatus));
8346 txstatus.compl_hdr.flow_ring_id = flowid;
8347 txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
8348 wr_idx = ring->wr;
8349
8350 while (wr_idx != rd_idx) {
8351 if (wr_idx)
8352 wr_idx--;
8353 else
8354 wr_idx = ring->max_items - 1;
8355 txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
8356 (wr_idx * ring->item_len));
8357 txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
8358 dhd_prot_txstatus_process(dhd, &txstatus);
8359 }
8360 }
8361
8362 static void
8363 dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
8364 {
8365 tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
8366
8367 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
8368 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
8369
8370 if (dhd->fast_delete_ring_support) {
8371 dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
8372 flow_delete_resp->read_idx);
8373 }
8374 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
8375 flow_delete_resp->cmplt.status);
8376 }
8377
8378 static void
8379 dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
8380 {
8381 #ifdef IDLE_TX_FLOW_MGMT
8382 tx_idle_flowring_resume_response_t *flow_resume_resp =
8383 (tx_idle_flowring_resume_response_t *)msg;
8384
8385 DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
8386 flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
8387
8388 dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
8389 flow_resume_resp->cmplt.status);
8390 #endif /* IDLE_TX_FLOW_MGMT */
8391 }
8392
8393 static void
8394 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
8395 {
8396 #ifdef IDLE_TX_FLOW_MGMT
8397 int16 status;
8398 tx_idle_flowring_suspend_response_t *flow_suspend_resp =
8399 (tx_idle_flowring_suspend_response_t *)msg;
8400 status = flow_suspend_resp->cmplt.status;
8401
8402 DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
8403 __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
8404 status));
8405
8406 if (status != BCME_OK) {
8407
8408 DHD_ERROR(("%s Error in Suspending Flow rings!!"
8409 "Dongle will still be polling idle rings!!Status = %d \n",
8410 __FUNCTION__, status));
8411 }
8412 #endif /* IDLE_TX_FLOW_MGMT */
8413 }
8414
8415 int
8416 dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8417 {
8418 tx_flowring_flush_request_t *flow_flush_rqst;
8419 dhd_prot_t *prot = dhd->prot;
8420 unsigned long flags;
8421 uint16 alloced = 0;
8422 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8423
8424 DHD_RING_LOCK(ring->ring_lock, flags);
8425
8426 /* Request for ring buffer space */
8427 flow_flush_rqst = (tx_flowring_flush_request_t *)
8428 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8429 if (flow_flush_rqst == NULL) {
8430 DHD_RING_UNLOCK(ring->ring_lock, flags);
8431 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
8432 return BCME_NOMEM;
8433 }
8434
8435 /* Common msg buf hdr */
8436 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
8437 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8438 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
8439 flow_flush_rqst->msg.flags = ring->current_phase;
8440 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8441 ring->seqnum++;
8442
8443 flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8444 flow_flush_rqst->reason = htol16(BCME_OK);
8445
8446 DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
8447
8448 /* update ring's WR index and ring doorbell to dongle */
8449 dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
8450
8451 DHD_RING_UNLOCK(ring->ring_lock, flags);
8452
8453 return BCME_OK;
8454 } /* dhd_prot_flow_ring_flush */
8455
8456 static void
8457 dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
8458 {
8459 tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
8460
8461 DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
8462 flow_flush_resp->cmplt.status));
8463
8464 dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
8465 flow_flush_resp->cmplt.status);
8466 }
8467
8468 /**
8469 * Request dongle to configure soft doorbells for D2H rings. Host populated soft
8470 * doorbell information is transferred to dongle via the d2h ring config control
8471 * message.
8472 */
8473 void
8474 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
8475 {
8476 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
8477 uint16 ring_idx;
8478 uint8 *msg_next;
8479 void *msg_start;
8480 uint16 alloced = 0;
8481 unsigned long flags;
8482 dhd_prot_t *prot = dhd->prot;
8483 ring_config_req_t *ring_config_req;
8484 bcmpcie_soft_doorbell_t *soft_doorbell;
8485 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8486 const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
8487
8488 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
8489 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
8490 msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
8491
8492 if (msg_start == NULL) {
8493 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
8494 __FUNCTION__, d2h_rings));
8495 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8496 return;
8497 }
8498
8499 msg_next = (uint8*)msg_start;
8500
8501 for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
8502
8503 /* position the ring_config_req into the ctrl subm ring */
8504 ring_config_req = (ring_config_req_t *)msg_next;
8505
8506 /* Common msg header */
8507 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
8508 ring_config_req->msg.if_id = 0;
8509 ring_config_req->msg.flags = 0;
8510
8511 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8512 ctrl_ring->seqnum++;
8513
8514 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
8515
8516 /* Ring Config subtype and d2h ring_id */
8517 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
8518 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
8519
8520 /* Host soft doorbell configuration */
8521 soft_doorbell = &prot->soft_doorbell[ring_idx];
8522
8523 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
8524 ring_config_req->soft_doorbell.haddr.high =
8525 htol32(soft_doorbell->haddr.high);
8526 ring_config_req->soft_doorbell.haddr.low =
8527 htol32(soft_doorbell->haddr.low);
8528 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
8529 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
8530
8531 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
8532 __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
8533 ring_config_req->soft_doorbell.haddr.low,
8534 ring_config_req->soft_doorbell.value));
8535
8536 msg_next = msg_next + ctrl_ring->item_len;
8537 }
8538
8539 /* update control subn ring's WR index and ring doorbell to dongle */
8540 dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
8541
8542 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
8543
8544 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
8545 }
8546
8547 static void
8548 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
8549 {
8550 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
8551 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
8552 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
8553 }
8554
8555 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
8556 void
8557 copy_ext_trap_sig(dhd_pub_t *dhd, trap_t *tr)
8558 {
8559 uint32 *ext_data = dhd->extended_trap_data;
8560 hnd_ext_trap_hdr_t *hdr;
8561 const bcm_tlv_t *tlv;
8562
8563 if (ext_data == NULL) {
8564 return;
8565 }
8566 /* First word is original trap_data */
8567 ext_data++;
8568
8569 /* Followed by the extended trap data header */
8570 hdr = (hnd_ext_trap_hdr_t *)ext_data;
8571
8572 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
8573 if (tlv) {
8574 memcpy(tr, &tlv->data, sizeof(struct _trap_struct));
8575 }
8576 }
8577 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
8578
8579 typedef struct {
8580 char name[HANG_INFO_TRAP_T_NAME_MAX];
8581 uint32 offset;
8582 } hang_info_trap_t;
8583
8584 static hang_info_trap_t hang_info_trap_tbl[] = {
8585 {"reason", 0},
8586 {"ver", VENDOR_SEND_HANG_EXT_INFO_VER},
8587 {"stype", 0},
8588 TRAP_T_NAME_OFFSET(type),
8589 TRAP_T_NAME_OFFSET(epc),
8590 TRAP_T_NAME_OFFSET(cpsr),
8591 TRAP_T_NAME_OFFSET(spsr),
8592 TRAP_T_NAME_OFFSET(r0),
8593 TRAP_T_NAME_OFFSET(r1),
8594 TRAP_T_NAME_OFFSET(r2),
8595 TRAP_T_NAME_OFFSET(r3),
8596 TRAP_T_NAME_OFFSET(r4),
8597 TRAP_T_NAME_OFFSET(r5),
8598 TRAP_T_NAME_OFFSET(r6),
8599 TRAP_T_NAME_OFFSET(r7),
8600 TRAP_T_NAME_OFFSET(r8),
8601 TRAP_T_NAME_OFFSET(r9),
8602 TRAP_T_NAME_OFFSET(r10),
8603 TRAP_T_NAME_OFFSET(r11),
8604 TRAP_T_NAME_OFFSET(r12),
8605 TRAP_T_NAME_OFFSET(r13),
8606 TRAP_T_NAME_OFFSET(r14),
8607 TRAP_T_NAME_OFFSET(pc),
8608 {"", 0}
8609 };
8610
8611 #define TAG_TRAP_IS_STATE(tag) \
8612 ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || (tag == TAG_TRAP_WLC_STATE))
8613
8614 static void
8615 copy_hang_info_head(char *dest, trap_t *src, int len, int field_name,
8616 int *bytes_written, int *cnt, char *cookie)
8617 {
8618 uint8 *ptr;
8619 int remain_len;
8620 int i;
8621
8622 ptr = (uint8 *)src;
8623
8624 memset(dest, 0, len);
8625 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8626
8627 /* hang reason, hang info ver */
8628 for (i = 0; (i < HANG_INFO_TRAP_T_SUBTYPE_IDX) && (*cnt < HANG_FIELD_CNT_MAX);
8629 i++, (*cnt)++) {
8630 if (field_name) {
8631 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8632 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
8633 hang_info_trap_tbl[i].name, HANG_KEY_DEL);
8634 }
8635 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8636 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%d%c",
8637 hang_info_trap_tbl[i].offset, HANG_KEY_DEL);
8638
8639 }
8640
8641 if (*cnt < HANG_FIELD_CNT_MAX) {
8642 if (field_name) {
8643 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8644 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
8645 "cookie", HANG_KEY_DEL);
8646 }
8647 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8648 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s%c",
8649 cookie, HANG_KEY_DEL);
8650 (*cnt)++;
8651 }
8652
8653 if (*cnt < HANG_FIELD_CNT_MAX) {
8654 if (field_name) {
8655 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8656 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
8657 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].name,
8658 HANG_KEY_DEL);
8659 }
8660 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8661 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
8662 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset,
8663 HANG_KEY_DEL);
8664 (*cnt)++;
8665 }
8666
8667 if (*cnt < HANG_FIELD_CNT_MAX) {
8668 if (field_name) {
8669 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8670 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%s:%c",
8671 hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].name,
8672 HANG_KEY_DEL);
8673 }
8674 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8675 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%08x%c",
8676 *(uint32 *)
8677 (ptr + hang_info_trap_tbl[HANG_INFO_TRAP_T_EPC_IDX].offset),
8678 HANG_KEY_DEL);
8679 (*cnt)++;
8680 }
8681 }
8682
8683 static void
8684 copy_hang_info_trap_t(char *dest, trap_t *src, int len, int field_name,
8685 int *bytes_written, int *cnt, char *cookie)
8686 {
8687 uint8 *ptr;
8688 int remain_len;
8689 int i;
8690
8691 ptr = (uint8 *)src;
8692
8693 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8694
8695 for (i = HANG_INFO_TRAP_T_OFFSET_IDX;
8696 (hang_info_trap_tbl[i].name[0] != 0) && (*cnt < HANG_FIELD_CNT_MAX);
8697 i++, (*cnt)++) {
8698 if (field_name) {
8699 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8700 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%s:",
8701 HANG_RAW_DEL, hang_info_trap_tbl[i].name);
8702 }
8703 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8704 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
8705 HANG_RAW_DEL, *(uint32 *)(ptr + hang_info_trap_tbl[i].offset));
8706 }
8707 }
8708
8709 static void
8710 copy_hang_info_stack(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
8711 {
8712 int remain_len;
8713 int i = 0;
8714 const uint32 *stack;
8715 uint32 *ext_data = dhd->extended_trap_data;
8716 hnd_ext_trap_hdr_t *hdr;
8717 const bcm_tlv_t *tlv;
8718 int remain_stack_cnt = 0;
8719 uint32 dummy_data = 0;
8720 int bigdata_key_stack_cnt = 0;
8721
8722 if (ext_data == NULL) {
8723 return;
8724 }
8725 /* First word is original trap_data */
8726 ext_data++;
8727
8728 /* Followed by the extended trap data header */
8729 hdr = (hnd_ext_trap_hdr_t *)ext_data;
8730
8731 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
8732
8733 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8734
8735 if (tlv) {
8736 stack = (const uint32 *)tlv->data;
8737
8738 *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
8739 "%08x", *(uint32 *)(stack++));
8740 (*cnt)++;
8741 if (*cnt >= HANG_FIELD_CNT_MAX) {
8742 return;
8743 }
8744 for (i = 1; i < (uint32)(tlv->len / sizeof(uint32)); i++, bigdata_key_stack_cnt++) {
8745 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8746 /* Raw data for bigdata use '_' and Key data for bigdata use space */
8747 *bytes_written += scnprintf(&dest[*bytes_written], remain_len,
8748 "%c%08x",
8749 i <= HANG_INFO_BIGDATA_KEY_STACK_CNT ? HANG_KEY_DEL : HANG_RAW_DEL,
8750 *(uint32 *)(stack++));
8751
8752 (*cnt)++;
8753 if ((*cnt >= HANG_FIELD_CNT_MAX) ||
8754 (i >= HANG_FIELD_TRAP_T_STACK_CNT_MAX)) {
8755 return;
8756 }
8757 }
8758 }
8759
8760 remain_stack_cnt = HANG_FIELD_TRAP_T_STACK_CNT_MAX - i;
8761
8762 for (i = 0; i < remain_stack_cnt; i++) {
8763 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8764 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
8765 HANG_RAW_DEL, dummy_data);
8766 (*cnt)++;
8767 if (*cnt >= HANG_FIELD_CNT_MAX) {
8768 return;
8769 }
8770 }
8771
8772 }
8773
8774 static void
8775 get_hang_info_trap_subtype(dhd_pub_t *dhd, uint32 *subtype)
8776 {
8777 uint32 i;
8778 uint32 *ext_data = dhd->extended_trap_data;
8779 hnd_ext_trap_hdr_t *hdr;
8780 const bcm_tlv_t *tlv;
8781
8782 /* First word is original trap_data */
8783 ext_data++;
8784
8785 /* Followed by the extended trap data header */
8786 hdr = (hnd_ext_trap_hdr_t *)ext_data;
8787
8788 /* Dump a list of all tags found before parsing data */
8789 for (i = TAG_TRAP_DEEPSLEEP; i < TAG_TRAP_LAST; i++) {
8790 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
8791 if (tlv) {
8792 if (!TAG_TRAP_IS_STATE(i)) {
8793 *subtype = i;
8794 return;
8795 }
8796 }
8797 }
8798 }
8799
8800 static void
8801 copy_hang_info_specific(dhd_pub_t *dhd, char *dest, int *bytes_written, int *cnt)
8802 {
8803 int remain_len;
8804 int i;
8805 const uint32 *data;
8806 uint32 *ext_data = dhd->extended_trap_data;
8807 hnd_ext_trap_hdr_t *hdr;
8808 const bcm_tlv_t *tlv;
8809 int remain_trap_data = 0;
8810 uint8 buf_u8[sizeof(uint32)] = { 0, };
8811 const uint8 *p_u8;
8812
8813 if (ext_data == NULL) {
8814 return;
8815 }
8816 /* First word is original trap_data */
8817 ext_data++;
8818
8819 /* Followed by the extended trap data header */
8820 hdr = (hnd_ext_trap_hdr_t *)ext_data;
8821
8822 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
8823 if (tlv) {
8824 /* header include tlv hader */
8825 remain_trap_data = (hdr->len - tlv->len - sizeof(uint16));
8826 }
8827
8828 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
8829 if (tlv) {
8830 /* header include tlv hader */
8831 remain_trap_data -= (tlv->len + sizeof(uint16));
8832 }
8833
8834 data = (const uint32 *)(hdr->data + (hdr->len - remain_trap_data));
8835
8836 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8837
8838 for (i = 0; i < (uint32)(remain_trap_data / sizeof(uint32)) && *cnt < HANG_FIELD_CNT_MAX;
8839 i++, (*cnt)++) {
8840 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8841 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
8842 HANG_RAW_DEL, *(uint32 *)(data++));
8843 }
8844
8845 if (*cnt >= HANG_FIELD_CNT_MAX) {
8846 return;
8847 }
8848
8849 remain_trap_data -= (sizeof(uint32) * i);
8850
8851 if (remain_trap_data > sizeof(buf_u8)) {
8852 DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__));
8853 remain_trap_data = sizeof(buf_u8);
8854 }
8855
8856 if (remain_trap_data) {
8857 p_u8 = (const uint8 *)data;
8858 for (i = 0; i < remain_trap_data; i++) {
8859 buf_u8[i] = *(const uint8 *)(p_u8++);
8860 }
8861
8862 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - *bytes_written;
8863 *bytes_written += scnprintf(&dest[*bytes_written], remain_len, "%c%08x",
8864 HANG_RAW_DEL, ltoh32_ua(buf_u8));
8865 (*cnt)++;
8866 }
8867 }
8868
8869 void
8870 copy_hang_info_trap(dhd_pub_t *dhd)
8871 {
8872 trap_t tr;
8873 int bytes_written;
8874 int trap_subtype = 0;
8875
8876 if (!dhd || !dhd->hang_info) {
8877 DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__,
8878 dhd, (dhd ? dhd->hang_info : NULL)));
8879 return;
8880 }
8881
8882 if (!dhd->dongle_trap_occured) {
8883 DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__));
8884 return;
8885 }
8886
8887 memset(&tr, 0x00, sizeof(struct _trap_struct));
8888
8889 copy_ext_trap_sig(dhd, &tr);
8890 get_hang_info_trap_subtype(dhd, &trap_subtype);
8891
8892 hang_info_trap_tbl[HANG_INFO_TRAP_T_REASON_IDX].offset = HANG_REASON_DONGLE_TRAP;
8893 hang_info_trap_tbl[HANG_INFO_TRAP_T_SUBTYPE_IDX].offset = trap_subtype;
8894
8895 bytes_written = 0;
8896 dhd->hang_info_cnt = 0;
8897 get_debug_dump_time(dhd->debug_dump_time_hang_str);
8898 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
8899
8900 copy_hang_info_head(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
8901 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
8902
8903 DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
8904 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
8905
8906 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
8907
8908 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
8909 copy_hang_info_stack(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
8910 DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
8911 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
8912 }
8913
8914 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
8915 copy_hang_info_trap_t(dhd->hang_info, &tr, VENDOR_SEND_HANG_EXT_INFO_LEN, FALSE,
8916 &bytes_written, &dhd->hang_info_cnt, dhd->debug_dump_time_hang_str);
8917 DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
8918 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
8919 }
8920
8921 if (dhd->hang_info_cnt < HANG_FIELD_CNT_MAX) {
8922 copy_hang_info_specific(dhd, dhd->hang_info, &bytes_written, &dhd->hang_info_cnt);
8923 DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
8924 dhd->hang_info_cnt, (int)strlen(dhd->hang_info), dhd->hang_info));
8925 }
8926
8927 }
8928 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
8929
8930 int
8931 dhd_prot_debug_info_print(dhd_pub_t *dhd)
8932 {
8933 dhd_prot_t *prot = dhd->prot;
8934 msgbuf_ring_t *ring;
8935 uint16 rd, wr;
8936 uint32 dma_buf_len;
8937 uint64 current_time;
8938
8939 DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
8940 DHD_ERROR(("DHD: %s\n", dhd_version));
8941 DHD_ERROR(("Firmware: %s\n", fw_version));
8942
8943 #ifdef DHD_FW_COREDUMP
8944 DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
8945 DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
8946 #endif /* DHD_FW_COREDUMP */
8947
8948 DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
8949 DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
8950 prot->device_ipc_version,
8951 prot->host_ipc_version,
8952 prot->active_ipc_version));
8953 DHD_ERROR(("d2h_intr_method -> %s\n",
8954 dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
8955 DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
8956 prot->max_tsbufpost, prot->cur_ts_bufs_posted));
8957 DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
8958 prot->max_infobufpost, prot->infobufpost));
8959 DHD_ERROR(("max event bufs to post: %d, posted %d\n",
8960 prot->max_eventbufpost, prot->cur_event_bufs_posted));
8961 DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
8962 prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
8963 DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
8964 prot->max_rxbufpost, prot->rxbufpost));
8965 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
8966 h2d_max_txpost, prot->h2d_max_txpost));
8967
8968 current_time = OSL_LOCALTIME_NS();
8969 DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
8970 DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
8971 " ioctl_ack_time="SEC_USEC_FMT
8972 " ioctl_cmplt_time="SEC_USEC_FMT"\n",
8973 GET_SEC_USEC(prot->ioctl_fillup_time),
8974 GET_SEC_USEC(prot->ioctl_ack_time),
8975 GET_SEC_USEC(prot->ioctl_cmplt_time)));
8976
8977 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
8978
8979 ring = &prot->h2dring_ctrl_subn;
8980 dma_buf_len = ring->max_items * ring->item_len;
8981 DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8982 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8983 ltoh32(ring->base_addr.low_addr), dma_buf_len));
8984 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8985 if (dhd->bus->is_linkdown) {
8986 DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
8987 " due to PCIe link down\r\n"));
8988 } else {
8989 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8990 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8991 DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8992 }
8993 DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8994
8995 ring = &prot->d2hring_ctrl_cpln;
8996 dma_buf_len = ring->max_items * ring->item_len;
8997 DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8998 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8999 ltoh32(ring->base_addr.low_addr), dma_buf_len));
9000 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9001 if (dhd->bus->is_linkdown) {
9002 DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
9003 " due to PCIe link down\r\n"));
9004 } else {
9005 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9006 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9007 DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9008 }
9009 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
9010
9011 ring = prot->h2dring_info_subn;
9012 if (ring) {
9013 dma_buf_len = ring->max_items * ring->item_len;
9014 DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9015 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9016 ltoh32(ring->base_addr.low_addr), dma_buf_len));
9017 DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9018 if (dhd->bus->is_linkdown) {
9019 DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
9020 " due to PCIe link down\r\n"));
9021 } else {
9022 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9023 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9024 DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9025 }
9026 DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
9027 }
9028 ring = prot->d2hring_info_cpln;
9029 if (ring) {
9030 dma_buf_len = ring->max_items * ring->item_len;
9031 DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9032 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9033 ltoh32(ring->base_addr.low_addr), dma_buf_len));
9034 DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9035 if (dhd->bus->is_linkdown) {
9036 DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
9037 " due to PCIe link down\r\n"));
9038 } else {
9039 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9040 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9041 DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9042 }
9043 DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
9044 }
9045
9046 ring = &prot->d2hring_tx_cpln;
9047 if (ring) {
9048 dma_buf_len = ring->max_items * ring->item_len;
9049 DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9050 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9051 ltoh32(ring->base_addr.low_addr), dma_buf_len));
9052 DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9053 if (dhd->bus->is_linkdown) {
9054 DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
9055 " due to PCIe link down\r\n"));
9056 } else {
9057 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9058 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9059 DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9060 }
9061 DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
9062 }
9063
9064 ring = &prot->d2hring_rx_cpln;
9065 if (ring) {
9066 dma_buf_len = ring->max_items * ring->item_len;
9067 DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9068 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
9069 ltoh32(ring->base_addr.low_addr), dma_buf_len));
9070 DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
9071 if (dhd->bus->is_linkdown) {
9072 DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
9073 " due to PCIe link down\r\n"));
9074 } else {
9075 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
9076 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
9077 DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
9078 }
9079 DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
9080 }
9081
9082 DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
9083 __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
9084
9085 dhd_pcie_debug_info_dump(dhd);
9086
9087 return 0;
9088 }
9089
9090 int
9091 dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
9092 {
9093 uint32 *ptr;
9094 uint32 value;
9095
9096 if (dhd->prot->d2h_dma_indx_wr_buf.va) {
9097 uint32 i;
9098 uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
9099
9100 OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
9101 dhd->prot->d2h_dma_indx_wr_buf.len);
9102
9103 ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
9104
9105 bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
9106
9107 bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
9108 value = ltoh32(*ptr);
9109 bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
9110 ptr++;
9111 value = ltoh32(*ptr);
9112 bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
9113
9114 ptr++;
9115 bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
9116 for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
9117 value = ltoh32(*ptr);
9118 bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
9119 ptr++;
9120 }
9121 }
9122
9123 if (dhd->prot->h2d_dma_indx_rd_buf.va) {
9124 OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
9125 dhd->prot->h2d_dma_indx_rd_buf.len);
9126
9127 ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
9128
9129 bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
9130 value = ltoh32(*ptr);
9131 bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
9132 ptr++;
9133 value = ltoh32(*ptr);
9134 bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
9135 ptr++;
9136 value = ltoh32(*ptr);
9137 bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
9138 }
9139
9140 return 0;
9141 }
9142
9143 uint32
9144 dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
9145 {
9146 dhd_prot_t *prot = dhd->prot;
9147 #if DHD_DBG_SHOW_METADATA
9148 prot->metadata_dbg = val;
9149 #endif // endif
9150 return (uint32)prot->metadata_dbg;
9151 }
9152
9153 uint32
9154 dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
9155 {
9156 dhd_prot_t *prot = dhd->prot;
9157 return (uint32)prot->metadata_dbg;
9158 }
9159
9160 uint32
9161 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
9162 {
9163 dhd_prot_t *prot = dhd->prot;
9164 if (rx)
9165 prot->rx_metadata_offset = (uint16)val;
9166 else
9167 prot->tx_metadata_offset = (uint16)val;
9168 return dhd_prot_metadatalen_get(dhd, rx);
9169 }
9170
9171 uint32
9172 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
9173 {
9174 dhd_prot_t *prot = dhd->prot;
9175 if (rx)
9176 return prot->rx_metadata_offset;
9177 else
9178 return prot->tx_metadata_offset;
9179 }
9180
9181 /** optimization to write "n" tx items at a time to ring */
9182 uint32
9183 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
9184 {
9185 dhd_prot_t *prot = dhd->prot;
9186 if (set)
9187 prot->txp_threshold = (uint16)val;
9188 val = prot->txp_threshold;
9189 return val;
9190 }
9191
9192 #ifdef DHD_RX_CHAINING
9193
9194 static INLINE void BCMFASTPATH
9195 dhd_rxchain_reset(rxchain_info_t *rxchain)
9196 {
9197 rxchain->pkt_count = 0;
9198 }
9199
9200 static void BCMFASTPATH
9201 dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
9202 {
9203 uint8 *eh;
9204 uint8 prio;
9205 dhd_prot_t *prot = dhd->prot;
9206 rxchain_info_t *rxchain = &prot->rxchain;
9207
9208 ASSERT(!PKTISCHAINED(pkt));
9209 ASSERT(PKTCLINK(pkt) == NULL);
9210 ASSERT(PKTCGETATTR(pkt) == 0);
9211
9212 eh = PKTDATA(dhd->osh, pkt);
9213 prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
9214
9215 if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
9216 rxchain->h_da, rxchain->h_prio))) {
9217 /* Different flow - First release the existing chain */
9218 dhd_rxchain_commit(dhd);
9219 }
9220
9221 /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
9222 /* so that the chain can be handed off to CTF bridge as is. */
9223 if (rxchain->pkt_count == 0) {
9224 /* First packet in chain */
9225 rxchain->pkthead = rxchain->pkttail = pkt;
9226
9227 /* Keep a copy of ptr to ether_da, ether_sa and prio */
9228 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
9229 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
9230 rxchain->h_prio = prio;
9231 rxchain->ifidx = ifidx;
9232 rxchain->pkt_count++;
9233 } else {
9234 /* Same flow - keep chaining */
9235 PKTSETCLINK(rxchain->pkttail, pkt);
9236 rxchain->pkttail = pkt;
9237 rxchain->pkt_count++;
9238 }
9239
9240 if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
9241 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
9242 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
9243 PKTSETCHAINED(dhd->osh, pkt);
9244 PKTCINCRCNT(rxchain->pkthead);
9245 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
9246 } else {
9247 dhd_rxchain_commit(dhd);
9248 return;
9249 }
9250
9251 /* If we have hit the max chain length, dispatch the chain and reset */
9252 if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
9253 dhd_rxchain_commit(dhd);
9254 }
9255 }
9256
9257 static void BCMFASTPATH
9258 dhd_rxchain_commit(dhd_pub_t *dhd)
9259 {
9260 dhd_prot_t *prot = dhd->prot;
9261 rxchain_info_t *rxchain = &prot->rxchain;
9262
9263 if (rxchain->pkt_count == 0)
9264 return;
9265
9266 /* Release the packets to dhd_linux */
9267 dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
9268
9269 /* Reset the chain */
9270 dhd_rxchain_reset(rxchain);
9271 }
9272
9273 #endif /* DHD_RX_CHAINING */
9274
9275 #ifdef IDLE_TX_FLOW_MGMT
9276 int
9277 dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
9278 {
9279 tx_idle_flowring_resume_request_t *flow_resume_rqst;
9280 msgbuf_ring_t *flow_ring;
9281 dhd_prot_t *prot = dhd->prot;
9282 unsigned long flags;
9283 uint16 alloced = 0;
9284 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9285
9286 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9287 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
9288 if (flow_ring == NULL) {
9289 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9290 __FUNCTION__, flow_ring_node->flowid));
9291 return BCME_NOMEM;
9292 }
9293
9294 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9295
9296 /* Request for ctrl_ring buffer space */
9297 flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
9298 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
9299
9300 if (flow_resume_rqst == NULL) {
9301 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
9302 DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
9303 __FUNCTION__, flow_ring_node->flowid));
9304 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9305 return BCME_NOMEM;
9306 }
9307
9308 flow_ring_node->prot_info = (void *)flow_ring;
9309
9310 /* Common msg buf hdr */
9311 flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
9312 flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
9313 flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
9314
9315 flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9316 ctrl_ring->seqnum++;
9317
9318 flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
9319 DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
9320 __FUNCTION__, flow_ring_node->flowid));
9321
9322 /* Update the flow_ring's WRITE index */
9323 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
9324 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9325 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
9326 } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
9327 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
9328 H2D_IFRM_INDX_WR_UPD,
9329 (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
9330 } else {
9331 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
9332 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
9333 }
9334
9335 /* update control subn ring's WR index and ring doorbell to dongle */
9336 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
9337
9338 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9339
9340 return BCME_OK;
9341 } /* dhd_prot_flow_ring_create */
9342
9343 int
9344 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
9345 {
9346 tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
9347 dhd_prot_t *prot = dhd->prot;
9348 unsigned long flags;
9349 uint16 index;
9350 uint16 alloced = 0;
9351 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
9352
9353 DHD_RING_LOCK(ring->ring_lock, flags);
9354
9355 /* Request for ring buffer space */
9356 flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
9357 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
9358
9359 if (flow_suspend_rqst == NULL) {
9360 DHD_RING_UNLOCK(ring->ring_lock, flags);
9361 DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
9362 return BCME_NOMEM;
9363 }
9364
9365 /* Common msg buf hdr */
9366 flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
9367 /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
9368 flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
9369
9370 flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
9371 ring->seqnum++;
9372
9373 /* Update flow id info */
9374 for (index = 0; index < count; index++)
9375 {
9376 flow_suspend_rqst->ring_id[index] = ringid[index];
9377 }
9378 flow_suspend_rqst->num = count;
9379
9380 DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
9381
9382 /* update ring's WR index and ring doorbell to dongle */
9383 dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
9384
9385 DHD_RING_UNLOCK(ring->ring_lock, flags);
9386
9387 return BCME_OK;
9388 }
9389 #endif /* IDLE_TX_FLOW_MGMT */
9390
9391 static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
9392 {
9393 switch (tag)
9394 {
9395 case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
9396 case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
9397 case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
9398 case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
9399 case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
9400 case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
9401 case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
9402 case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
9403 case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
9404 case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
9405 case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
9406 case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
9407 case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
9408 case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
9409 case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
9410 case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
9411 case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
9412 case TAG_TRAP_LAST:
9413 default:
9414 return "Unknown";
9415 }
9416 return "Unknown";
9417 }
9418
9419 int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
9420 {
9421 uint32 i;
9422 uint32 *ext_data;
9423 hnd_ext_trap_hdr_t *hdr;
9424 const bcm_tlv_t *tlv;
9425 const trap_t *tr;
9426 const uint32 *stack;
9427 const hnd_ext_trap_bp_err_t *bpe;
9428 uint32 raw_len;
9429
9430 ext_data = dhdp->extended_trap_data;
9431
9432 /* return if there is no extended trap data */
9433 if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
9434 {
9435 bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
9436 return BCME_OK;
9437 }
9438
9439 bcm_bprintf(b, "Extended trap data\n");
9440
9441 /* First word is original trap_data */
9442 bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
9443 ext_data++;
9444
9445 /* Followed by the extended trap data header */
9446 hdr = (hnd_ext_trap_hdr_t *)ext_data;
9447 bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
9448
9449 /* Dump a list of all tags found before parsing data */
9450 bcm_bprintf(b, "\nTags Found:\n");
9451 for (i = 0; i < TAG_TRAP_LAST; i++) {
9452 tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
9453 if (tlv)
9454 bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
9455 }
9456
9457 if (raw)
9458 {
9459 raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
9460 for (i = 0; i < raw_len; i++)
9461 {
9462 bcm_bprintf(b, "0x%08x ", ext_data[i]);
9463 if (i % 4 == 3)
9464 bcm_bprintf(b, "\n");
9465 }
9466 return BCME_OK;
9467 }
9468
9469 /* Extract the various supported TLVs from the extended trap data */
9470 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
9471 if (tlv)
9472 {
9473 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
9474 bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
9475 }
9476
9477 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
9478 if (tlv)
9479 {
9480 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
9481 tr = (const trap_t *)tlv->data;
9482
9483 bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
9484 tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
9485 bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
9486 tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
9487 bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
9488 tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
9489 }
9490
9491 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
9492 if (tlv)
9493 {
9494 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
9495 stack = (const uint32 *)tlv->data;
9496 for (i = 0; i < (uint32)(tlv->len / 4); i++)
9497 {
9498 bcm_bprintf(b, " 0x%08x\n", *stack);
9499 stack++;
9500 }
9501 }
9502
9503 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
9504 if (tlv)
9505 {
9506 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
9507 bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
9508 bcm_bprintf(b, " error: %x\n", bpe->error);
9509 bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
9510 bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
9511 bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
9512 bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
9513 bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
9514 bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
9515 bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
9516 bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
9517 bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
9518 bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
9519 bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
9520 bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
9521 bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
9522 bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
9523 }
9524
9525 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
9526 if (tlv)
9527 {
9528 const hnd_ext_trap_heap_err_t* hme;
9529
9530 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
9531 hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
9532 bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
9533 bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
9534 bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
9535 bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
9536 bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
9537
9538 bcm_bprintf(b, " Histogram:\n");
9539 for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
9540 if (hme->heap_histogm[i] == 0xfffe)
9541 bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
9542 else if (hme->heap_histogm[i] == 0xffff)
9543 bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
9544 else
9545 bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
9546 hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
9547 * hme->heap_histogm[i + 1]);
9548 }
9549
9550 bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
9551 for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
9552 bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
9553 }
9554 }
9555
9556 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
9557 if (tlv)
9558 {
9559 const hnd_ext_trap_pcie_mem_err_t* pqme;
9560
9561 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
9562 pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
9563 bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
9564 bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
9565 }
9566
9567 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
9568 if (tlv)
9569 {
9570 const hnd_ext_trap_wlc_mem_err_t* wsme;
9571
9572 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
9573 wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
9574 bcm_bprintf(b, " instance: %d\n", wsme->instance);
9575 bcm_bprintf(b, " associated: %d\n", wsme->associated);
9576 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
9577 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
9578 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
9579 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
9580 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
9581 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
9582
9583 if (tlv->len >= (sizeof(*wsme) * 2)) {
9584 wsme++;
9585 bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
9586 bcm_bprintf(b, " associated: %d\n", wsme->associated);
9587 bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
9588 bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
9589 bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
9590 bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
9591 bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
9592 bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
9593 }
9594 }
9595
9596 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
9597 if (tlv)
9598 {
9599 const hnd_ext_trap_phydbg_t* phydbg;
9600 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
9601 phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
9602 bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
9603 bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
9604 bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
9605 bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
9606 bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
9607 bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
9608 bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
9609 bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
9610 bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
9611 bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
9612 bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
9613 bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
9614 bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
9615 bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
9616 bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
9617 bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
9618 bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
9619 bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
9620 bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
9621 bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
9622 bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
9623 bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
9624 bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
9625 bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
9626 bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
9627 bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
9628 bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
9629 for (i = 0; i < 3; i++)
9630 bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
9631 }
9632
9633 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
9634 if (tlv)
9635 {
9636 const hnd_ext_trap_psmwd_t* psmwd;
9637 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
9638 psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
9639 bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
9640 bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
9641 bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
9642 bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
9643 bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
9644 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
9645 for (i = 0; i < 3; i++)
9646 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
9647 bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
9648 bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
9649 bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
9650 bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
9651 bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
9652 bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
9653 bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
9654 bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
9655 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
9656 bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
9657 bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
9658 bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
9659 bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
9660 bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
9661 bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
9662 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
9663 bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
9664 bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
9665 bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
9666 bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
9667 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
9668 bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
9669 bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
9670 }
9671
9672 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
9673 if (tlv)
9674 {
9675 const hnd_ext_trap_macsusp_t* macsusp;
9676 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
9677 macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
9678 bcm_bprintf(b, " version: %d\n", macsusp->version);
9679 bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
9680 bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
9681 bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
9682 bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
9683 for (i = 0; i < 4; i++)
9684 bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
9685 for (i = 0; i < 8; i++)
9686 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
9687 bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
9688 bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
9689 bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
9690 bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
9691 bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
9692 bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
9693 bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
9694 bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
9695 bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
9696 bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
9697 bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
9698 bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
9699 bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
9700 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
9701 }
9702
9703 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
9704 if (tlv)
9705 {
9706 const hnd_ext_trap_macenab_t* macwake;
9707 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
9708 macwake = (const hnd_ext_trap_macenab_t *)tlv;
9709 bcm_bprintf(b, " version: 0x%x\n", macwake->version);
9710 bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
9711 bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
9712 bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
9713 bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
9714 for (i = 0; i < 8; i++)
9715 bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
9716 bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
9717 bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
9718 bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
9719 bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
9720 bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
9721 bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
9722 bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
9723 bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
9724 bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
9725 bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
9726 bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
9727 bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
9728 bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
9729 }
9730
9731 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
9732 if (tlv)
9733 {
9734 const bcm_dngl_pcie_hc_t* hc;
9735 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
9736 hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
9737 bcm_bprintf(b, " version: 0x%x\n", hc->version);
9738 bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
9739 bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
9740 bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
9741 bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
9742 for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
9743 bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
9744 }
9745
9746 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
9747 if (tlv)
9748 {
9749 const pcie_hmapviolation_t* hmap;
9750 hmap = (const pcie_hmapviolation_t *)tlv->data;
9751 bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
9752 bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
9753 bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
9754 bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
9755 }
9756
9757 return BCME_OK;
9758 }
9759
9760 #ifdef BCMPCIE
9761 int
9762 dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
9763 uint16 seqnum, uint16 xt_id)
9764 {
9765 dhd_prot_t *prot = dhdp->prot;
9766 host_timestamp_msg_t *ts_req;
9767 unsigned long flags;
9768 uint16 alloced = 0;
9769 uchar *ts_tlv_buf;
9770 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
9771
9772 if ((tlvs == NULL) || (tlv_len == 0)) {
9773 DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
9774 __FUNCTION__, tlvs, tlv_len));
9775 return -1;
9776 }
9777
9778 DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
9779
9780 /* if Host TS req already pending go away */
9781 if (prot->hostts_req_buf_inuse == TRUE) {
9782 DHD_ERROR(("one host TS request already pending at device\n"));
9783 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9784 return -1;
9785 }
9786
9787 /* Request for cbuf space */
9788 ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
9789 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
9790 if (ts_req == NULL) {
9791 DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
9792 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9793 return -1;
9794 }
9795
9796 /* Common msg buf hdr */
9797 ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
9798 ts_req->msg.if_id = 0;
9799 ts_req->msg.flags = ctrl_ring->current_phase;
9800 ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
9801
9802 ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
9803 ctrl_ring->seqnum++;
9804
9805 ts_req->xt_id = xt_id;
9806 ts_req->seqnum = seqnum;
9807 /* populate TS req buffer info */
9808 ts_req->input_data_len = htol16(tlv_len);
9809 ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
9810 ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
9811 /* copy ioct payload */
9812 ts_tlv_buf = (void *) prot->hostts_req_buf.va;
9813 prot->hostts_req_buf_inuse = TRUE;
9814 memcpy(ts_tlv_buf, tlvs, tlv_len);
9815
9816 OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
9817
9818 if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
9819 DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
9820 }
9821
9822 DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
9823 ts_req->msg.request_id, ts_req->input_data_len,
9824 ts_req->xt_id, ts_req->seqnum));
9825
9826 /* upd wrt ptr and raise interrupt */
9827 dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
9828 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
9829
9830 DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
9831
9832 return 0;
9833 } /* dhd_prot_send_host_timestamp */
9834
9835 bool
9836 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
9837 {
9838 if (set)
9839 dhd->prot->tx_ts_log_enabled = enable;
9840
9841 return dhd->prot->tx_ts_log_enabled;
9842 }
9843
9844 bool
9845 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
9846 {
9847 if (set)
9848 dhd->prot->rx_ts_log_enabled = enable;
9849
9850 return dhd->prot->rx_ts_log_enabled;
9851 }
9852
9853 bool
9854 dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
9855 {
9856 if (set)
9857 dhd->prot->no_retry = enable;
9858
9859 return dhd->prot->no_retry;
9860 }
9861
9862 bool
9863 dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
9864 {
9865 if (set)
9866 dhd->prot->no_aggr = enable;
9867
9868 return dhd->prot->no_aggr;
9869 }
9870
9871 bool
9872 dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
9873 {
9874 if (set)
9875 dhd->prot->fixed_rate = enable;
9876
9877 return dhd->prot->fixed_rate;
9878 }
9879 #endif /* BCMPCIE */
9880
9881 void
9882 dhd_prot_dma_indx_free(dhd_pub_t *dhd)
9883 {
9884 dhd_prot_t *prot = dhd->prot;
9885
9886 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
9887 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
9888 }
9889
9890 void
9891 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
9892 {
9893 if (dhd->prot->max_tsbufpost > 0)
9894 dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
9895 }
9896
9897 static void BCMFASTPATH
9898 dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
9899 {
9900 DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
9901
9902 }
9903
9904 uint16
9905 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
9906 {
9907 return dhdp->prot->ioctl_trans_id;
9908 }