2 * @file definition of host message ring functionality
3 * Provides type definitions and function prototypes used to link the
4 * DHD OS, bus, and protocol modules.
6 * Copyright (C) 1999-2019, Broadcom.
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
27 * <<Broadcom-WL-IPTag/Open:>>
29 * $Id: dhd_msgbuf.c 796673 2018-12-26 08:34:38Z $
36 #include <bcmmsgbuf.h>
37 #include <bcmendian.h>
39 #include <dngl_stats.h>
41 #include <dhd_proto.h>
47 #include <dhd_debug.h>
49 #include <dhd_flowring.h>
51 #include <pcie_core.h>
56 #include <linux/cpu.h>
58 #define DHD_LB_WORKQ_SZ (8192)
59 #define DHD_LB_WORKQ_SYNC (16)
60 #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
64 #include <hnd_debug.h>
66 #include <hnd_armtrap.h>
67 #include <dnglevent.h>
69 #ifdef DHD_PKT_LOGGING
70 #include <dhd_pktlog.h>
71 #endif /* DHD_PKT_LOGGING */
73 extern char dhd_version
[];
74 extern char fw_version
[];
77 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
78 * address where a value must be written. Host may also interrupt coalescing
79 * on this soft doorbell.
80 * Use Case: Hosts with network processors, may register with the dongle the
81 * network processor's thread wakeup register and a value corresponding to the
82 * core/thread context. Dongle will issue a write transaction <address,value>
83 * to the PCIE RC which will need to be routed to the mapped register space, by
86 /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
88 /* Dependency Check */
89 #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
90 #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
91 #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
93 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */
95 #define DEFAULT_RX_BUFFERS_TO_POST 256
96 #define RXBUFPOST_THRESHOLD 32
97 #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
99 #define DHD_STOP_QUEUE_THRESHOLD 200
100 #define DHD_START_QUEUE_THRESHOLD 100
102 #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
103 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
105 /* flags for ioctl pending status */
106 #define MSGBUF_IOCTL_ACK_PENDING (1<<0)
107 #define MSGBUF_IOCTL_RESP_PENDING (1<<1)
109 #define DHD_IOCTL_REQ_PKTBUFSZ 2048
110 #define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
112 #define DMA_ALIGN_LEN 4
114 #define DMA_D2H_SCRATCH_BUF_LEN 8
115 #define DMA_XFER_LEN_LIMIT 0x400000
118 #ifndef DMA_HOST_BUFFER_LEN
119 #define DMA_HOST_BUFFER_LEN 0x200000
121 #endif /* BCM_HOST_BUF */
123 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
125 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
126 #define DHD_FLOWRING_MAX_EVENTBUF_POST 32
127 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
128 #define DHD_H2D_INFORING_MAX_BUF_POST 32
129 #define DHD_MAX_TSBUF_POST 8
131 #define DHD_PROT_FUNCS 43
133 /* Length of buffer in host for bus throughput measurement */
134 #define DHD_BUS_TPUT_BUF_LEN 2048
136 #define TXP_FLUSH_NITEMS
138 /* optimization to write "n" tx items at a time to ring */
139 #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
141 #define RING_NAME_MAX_LENGTH 24
142 #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
143 /* Giving room before ioctl_trans_id rollsover. */
144 #define BUFFER_BEFORE_ROLLOVER 300
146 /* 512K memory + 32K registers */
147 #define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
149 struct msgbuf_ring
; /* ring context for common and flow rings */
152 * PCIE D2H DMA Complete Sync Modes
154 * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
155 * Host system memory. A WAR using one of 3 approaches is needed:
156 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
157 * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
158 * writes in the last word of each work item. Each work item has a seqnum
159 * number = sequence num % 253.
161 * 3. Read Barrier: Dongle does a host memory read access prior to posting an
162 * interrupt, ensuring that D2H data transfer indeed completed.
163 * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
164 * ring contents before the indices.
166 * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
167 * callback (see dhd_prot_d2h_sync_none) may be bound.
169 * Dongle advertizes host side sync mechanism requirements.
172 #define PCIE_D2H_SYNC_WAIT_TRIES (512U)
173 #define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
174 #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
177 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
179 * On success: return cmn_msg_hdr_t::msg_type
180 * On failure: return 0 (invalid msg_type)
182 typedef uint8 (* d2h_sync_cb_t
)(dhd_pub_t
*dhd
, struct msgbuf_ring
*ring
,
183 volatile cmn_msg_hdr_t
*msg
, int msglen
);
186 * +----------------------------------------------------------------------------
188 * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
191 * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
192 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
194 * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
195 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
196 * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
198 * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
199 * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
201 * D2H Control Complete RingId = 2
202 * D2H Transmit Complete RingId = 3
203 * D2H Receive Complete RingId = 4
205 * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
206 * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
207 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
209 * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
210 * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
212 * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
213 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
214 * FlowId values would be in the range [2..133] and the corresponding
215 * RingId values would be in the range [5..136].
217 * The flowId allocator, may chose to, allocate Flowids:
218 * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
219 * X# of uc flowids in consecutive ranges (per station Id), where X is the
220 * packet's access category (e.g. 4 uc flowids per station).
223 * When DMA indices array feature is used, RingId=5, corresponding to the 0th
224 * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
225 * since the FlowId truly represents the index in the H2D DMA indices array.
227 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
228 * will represent the index in the D2H DMA indices array.
230 * +----------------------------------------------------------------------------
233 /* First TxPost Flowring Id */
234 #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
236 /* Determine whether a ringid belongs to a TxPost flowring */
237 #define DHD_IS_FLOWRING(ringid, max_flow_rings) \
238 ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
239 (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
241 /* Convert a H2D TxPost FlowId to a MsgBuf RingId */
242 #define DHD_FLOWID_TO_RINGID(flowid) \
243 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
245 /* Convert a MsgBuf RingId to a H2D TxPost FlowId */
246 #define DHD_RINGID_TO_FLOWID(ringid) \
247 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
249 /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
250 * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
251 * any array of H2D rings.
253 #define DHD_H2D_RING_OFFSET(ringid) \
254 (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
256 /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
257 * This may be used for IFRM.
259 #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
260 ((ringid) - BCMPCIE_COMMON_MSGRINGS)
262 /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
263 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
264 * any array of D2H rings.
265 * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
266 * max_h2d_rings: total number of h2d rings
268 #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
269 ((ringid) > (max_h2d_rings) ? \
270 ((ringid) - max_h2d_rings) : \
271 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
273 /* Convert a D2H DMA Indices Offset to a RingId */
274 #define DHD_D2H_RINGID(offset) \
275 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
277 #define DHD_DMAH_NULL ((void*)NULL)
280 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
281 * buffer does not occupy the entire cacheline, and another object is placed
282 * following the DMA-able buffer, data corruption may occur if the DMA-able
283 * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
286 #if defined(L1_CACHE_BYTES)
287 #define DHD_DMA_PAD (L1_CACHE_BYTES)
289 #define DHD_DMA_PAD (128)
292 /* Used in loopback tests */
293 typedef struct dhd_dmaxfer
{
294 dhd_dma_buf_t srcmem
;
295 dhd_dma_buf_t dstmem
;
306 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
307 * buffer, the WR and RD indices, ring parameters such as max number of items
308 * an length of each items, and other miscellaneous runtime state.
309 * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
310 * H2D TxPost ring as specified in the PCIE FullDongle Spec.
311 * Ring parameters are conveyed to the dongle, which maintains its own peer end
312 * ring state. Depending on whether the DMA Indices feature is supported, the
313 * host will update the WR/RD index in the DMA indices array in host memory or
314 * directly in dongle memory.
316 typedef struct msgbuf_ring
{
318 uint16 idx
; /* ring id */
319 uint16 rd
; /* read index */
320 uint16 curr_rd
; /* read index for debug */
321 uint16 wr
; /* write index */
322 uint16 max_items
; /* maximum number of items in ring */
323 uint16 item_len
; /* length of each item in the ring */
324 sh_addr_t base_addr
; /* LITTLE ENDIAN formatted: base address */
325 dhd_dma_buf_t dma_buf
; /* DMA-able buffer: pa, va, len, dmah, secdma */
326 uint32 seqnum
; /* next expected item's sequence number */
327 #ifdef TXP_FLUSH_NITEMS
329 /* # of messages on ring not yet announced to dongle */
330 uint16 pend_items_count
;
331 #endif /* TXP_FLUSH_NITEMS */
334 uint8 n_completion_ids
;
336 uint16 create_req_id
;
338 uint16 compeltion_ring_ids
[MAX_COMPLETION_RING_IDS_ASSOCIATED
];
339 uchar name
[RING_NAME_MAX_LENGTH
];
340 uint32 ring_mem_allocated
;
344 #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
345 #define DHD_RING_END_VA(ring) \
346 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
347 (((ring)->max_items - 1) * (ring)->item_len))
349 /* This can be overwritten by module parameter defined in dhd_linux.c
350 * or by dhd iovar h2d_max_txpost.
352 int h2d_max_txpost
= H2DRING_TXPOST_MAX_ITEM
;
354 /** DHD protocol handle. Is an opaque type to other DHD software layers. */
355 typedef struct dhd_prot
{
356 osl_t
*osh
; /* OSL handle */
359 uint16 max_rxbufpost
;
360 uint16 max_eventbufpost
;
361 uint16 max_ioctlrespbufpost
;
362 uint16 max_tsbufpost
;
363 uint16 max_infobufpost
;
365 uint16 cur_event_bufs_posted
;
366 uint16 cur_ioctlresp_bufs_posted
;
367 uint16 cur_ts_bufs_posted
;
369 /* Flow control mechanism based on active transmits pending */
370 osl_atomic_t active_tx_count
; /* increments/decrements on every packet tx/tx_status */
371 uint16 h2d_max_txpost
;
372 uint16 txp_threshold
; /* optimization to write "n" tx items at a time to ring */
374 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
375 msgbuf_ring_t h2dring_ctrl_subn
; /* H2D ctrl message submission ring */
376 msgbuf_ring_t h2dring_rxp_subn
; /* H2D RxBuf post ring */
377 msgbuf_ring_t d2hring_ctrl_cpln
; /* D2H ctrl completion ring */
378 msgbuf_ring_t d2hring_tx_cpln
; /* D2H Tx complete message ring */
379 msgbuf_ring_t d2hring_rx_cpln
; /* D2H Rx complete message ring */
380 msgbuf_ring_t
*h2dring_info_subn
; /* H2D info submission ring */
381 msgbuf_ring_t
*d2hring_info_cpln
; /* D2H info completion ring */
383 msgbuf_ring_t
*h2d_flowrings_pool
; /* Pool of preallocated flowings */
384 dhd_dma_buf_t flowrings_dma_buf
; /* Contiguous DMA buffer for flowrings */
385 uint16 h2d_rings_total
; /* total H2D (common rings + flowrings) */
387 uint32 rx_dataoffset
;
389 dhd_mb_ring_t mb_ring_fn
; /* called when dongle needs to be notified of new msg */
390 dhd_mb_ring_2_t mb_2_ring_fn
; /* called when dongle needs to be notified of new msg */
392 /* ioctl related resources */
394 int16 ioctl_status
; /* status returned from dongle */
395 uint16 ioctl_resplen
;
396 dhd_ioctl_recieved_status_t ioctl_received
;
398 dhd_dma_buf_t retbuf
; /* For holding ioctl response */
399 dhd_dma_buf_t ioctbuf
; /* For holding ioctl request */
401 dhd_dma_buf_t d2h_dma_scratch_buf
; /* For holding d2h scratch */
403 /* DMA-able arrays for holding WR and RD indices */
404 uint32 rw_index_sz
; /* Size of a RD or WR index in dongle */
405 dhd_dma_buf_t h2d_dma_indx_wr_buf
; /* Array of H2D WR indices */
406 dhd_dma_buf_t h2d_dma_indx_rd_buf
; /* Array of H2D RD indices */
407 dhd_dma_buf_t d2h_dma_indx_wr_buf
; /* Array of D2H WR indices */
408 dhd_dma_buf_t d2h_dma_indx_rd_buf
; /* Array of D2H RD indices */
409 dhd_dma_buf_t h2d_ifrm_indx_wr_buf
; /* Array of H2D WR indices for ifrm */
411 dhd_dma_buf_t host_bus_throughput_buf
; /* bus throughput measure buffer */
413 dhd_dma_buf_t
*flowring_buf
; /* pool of flow ring buf */
416 d2h_sync_cb_t d2h_sync_cb
; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
417 ulong d2h_sync_wait_max
; /* max number of wait loops to receive one msg */
418 ulong d2h_sync_wait_tot
; /* total wait loops */
420 dhd_dmaxfer_t dmaxfer
; /* for test/DMA loopback */
424 uint16 ioctl_trans_id
;
425 void *pktid_ctrl_map
; /* a pktid maps to a packet and its metadata */
426 void *pktid_rx_map
; /* pktid map for rx path */
427 void *pktid_tx_map
; /* pktid map for tx path */
429 void *pktid_map_handle_ioctl
;
430 #ifdef DHD_MAP_PKTID_LOGGING
431 void *pktid_dma_map
; /* pktid map for DMA MAP */
432 void *pktid_dma_unmap
; /* pktid map for DMA UNMAP */
433 #endif /* DHD_MAP_PKTID_LOGGING */
435 uint64 ioctl_fillup_time
; /* timestamp for ioctl fillup */
436 uint64 ioctl_ack_time
; /* timestamp for ioctl ack */
437 uint64 ioctl_cmplt_time
; /* timestamp for ioctl completion */
439 /* Applications/utilities can read tx and rx metadata using IOVARs */
440 uint16 rx_metadata_offset
;
441 uint16 tx_metadata_offset
;
443 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
444 /* Host's soft doorbell configuration */
445 bcmpcie_soft_doorbell_t soft_doorbell
[BCMPCIE_D2H_COMMON_MSGRINGS
];
446 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
448 /* Work Queues to be used by the producer and the consumer, and threshold
449 * when the WRITE index must be synced to consumer's workq
451 #if defined(DHD_LB_TXC)
452 uint32 tx_compl_prod_sync ____cacheline_aligned
;
453 bcm_workq_t tx_compl_prod
, tx_compl_cons
;
454 #endif /* DHD_LB_TXC */
455 #if defined(DHD_LB_RXC)
456 uint32 rx_compl_prod_sync ____cacheline_aligned
;
457 bcm_workq_t rx_compl_prod
, rx_compl_cons
;
458 #endif /* DHD_LB_RXC */
460 dhd_dma_buf_t fw_trap_buf
; /* firmware trap buffer */
462 uint32 host_ipc_version
; /* Host sypported IPC rev */
463 uint32 device_ipc_version
; /* FW supported IPC rev */
464 uint32 active_ipc_version
; /* Host advertised IPC rev */
465 dhd_dma_buf_t hostts_req_buf
; /* For holding host timestamp request buf */
466 bool hostts_req_buf_inuse
;
467 bool rx_ts_log_enabled
;
468 bool tx_ts_log_enabled
;
474 #ifdef DHD_DUMP_PCIE_RINGS
476 int dhd_ring_write(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, void *file
, unsigned long *file_posn
);
477 #endif /* DHD_DUMP_PCIE_RINGS */
479 extern bool dhd_timesync_delay_post_bufs(dhd_pub_t
*dhdp
);
480 extern void dhd_schedule_dmaxfer_free(dhd_pub_t
* dhdp
, dmaxref_mem_map_t
*dmmap
);
481 /* Convert a dmaaddr_t to a base_addr with htol operations */
482 static INLINE
void dhd_base_addr_htolpa(sh_addr_t
*base_addr
, dmaaddr_t pa
);
484 /* APIs for managing a DMA-able buffer */
485 static int dhd_dma_buf_audit(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
);
486 static int dhd_dma_buf_alloc(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
, uint32 buf_len
);
487 static void dhd_dma_buf_reset(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
);
488 static void dhd_dma_buf_free(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
);
490 /* msgbuf ring management */
491 static int dhd_prot_ring_attach(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
492 const char *name
, uint16 max_items
, uint16 len_item
, uint16 ringid
);
493 static void dhd_prot_ring_init(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
);
494 static void dhd_prot_ring_reset(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
);
495 static void dhd_prot_ring_detach(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
);
496 static void dhd_prot_process_fw_timestamp(dhd_pub_t
*dhd
, void* buf
);
498 /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
499 static int dhd_prot_flowrings_pool_attach(dhd_pub_t
*dhd
);
500 static void dhd_prot_flowrings_pool_reset(dhd_pub_t
*dhd
);
501 static void dhd_prot_flowrings_pool_detach(dhd_pub_t
*dhd
);
503 /* Fetch and Release a flowring msgbuf_ring from flowring pool */
504 static msgbuf_ring_t
*dhd_prot_flowrings_pool_fetch(dhd_pub_t
*dhd
,
506 /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
508 /* Producer: Allocate space in a msgbuf ring */
509 static void* dhd_prot_alloc_ring_space(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
510 uint16 nitems
, uint16
*alloced
, bool exactly_nitems
);
511 static void* dhd_prot_get_ring_space(msgbuf_ring_t
*ring
, uint16 nitems
,
512 uint16
*alloced
, bool exactly_nitems
);
514 /* Consumer: Determine the location where the next message may be consumed */
515 static uint8
* dhd_prot_get_read_addr(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
516 uint32
*available_len
);
518 /* Producer (WR index update) or Consumer (RD index update) indication */
519 static void dhd_prot_ring_write_complete(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
520 void *p
, uint16 len
);
521 static void dhd_prot_upd_read_idx(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
);
523 static INLINE
int dhd_prot_dma_indx_alloc(dhd_pub_t
*dhd
, uint8 type
,
524 dhd_dma_buf_t
*dma_buf
, uint32 bufsz
);
526 /* Set/Get a RD or WR index in the array of indices */
527 /* See also: dhd_prot_dma_indx_init() */
528 void dhd_prot_dma_indx_set(dhd_pub_t
*dhd
, uint16 new_index
, uint8 type
,
530 static uint16
dhd_prot_dma_indx_get(dhd_pub_t
*dhd
, uint8 type
, uint16 ringid
);
532 /* Locate a packet given a pktid */
533 static INLINE
void *dhd_prot_packet_get(dhd_pub_t
*dhd
, uint32 pktid
, uint8 pkttype
,
535 /* Locate a packet given a PktId and free it. */
536 static INLINE
void dhd_prot_packet_free(dhd_pub_t
*dhd
, void *pkt
, uint8 pkttype
, bool send
);
538 static int dhd_msgbuf_query_ioctl(dhd_pub_t
*dhd
, int ifidx
, uint cmd
,
539 void *buf
, uint len
, uint8 action
);
540 static int dhd_msgbuf_set_ioctl(dhd_pub_t
*dhd
, int ifidx
, uint cmd
,
541 void *buf
, uint len
, uint8 action
);
542 static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t
*dhd
, uint32 len
, void *buf
);
543 static int dhd_fillup_ioct_reqst(dhd_pub_t
*dhd
, uint16 len
, uint cmd
,
544 void *buf
, int ifidx
);
546 /* Post buffers for Rx, control ioctl response and events */
547 static uint16
dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t
*dhd
, uint8 msgid
, uint32 max_to_post
);
548 static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t
*pub
);
549 static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t
*pub
);
550 static void dhd_msgbuf_rxbuf_post(dhd_pub_t
*dhd
, bool use_rsv_pktid
);
551 static int dhd_prot_rxbuf_post(dhd_pub_t
*dhd
, uint16 count
, bool use_rsv_pktid
);
552 static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t
*pub
);
554 static void dhd_prot_return_rxbuf(dhd_pub_t
*dhd
, uint32 pktid
, uint32 rxcnt
);
556 /* D2H Message handling */
557 static int dhd_prot_process_msgtype(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, uint8
*buf
, uint32 len
);
559 /* D2H Message handlers */
560 static void dhd_prot_noop(dhd_pub_t
*dhd
, void *msg
);
561 static void dhd_prot_txstatus_process(dhd_pub_t
*dhd
, void *msg
);
562 static void dhd_prot_ioctcmplt_process(dhd_pub_t
*dhd
, void *msg
);
563 static void dhd_prot_ioctack_process(dhd_pub_t
*dhd
, void *msg
);
564 static void dhd_prot_ringstatus_process(dhd_pub_t
*dhd
, void *msg
);
565 static void dhd_prot_genstatus_process(dhd_pub_t
*dhd
, void *msg
);
566 static void dhd_prot_event_process(dhd_pub_t
*dhd
, void *msg
);
568 /* Loopback test with dongle */
569 static void dmaxfer_free_dmaaddr(dhd_pub_t
*dhd
, dhd_dmaxfer_t
*dma
);
570 static int dmaxfer_prepare_dmaaddr(dhd_pub_t
*dhd
, uint len
, uint srcdelay
,
571 uint destdelay
, dhd_dmaxfer_t
*dma
);
572 static void dhd_msgbuf_dmaxfer_process(dhd_pub_t
*dhd
, void *msg
);
574 /* Flowring management communication with dongle */
575 static void dhd_prot_flow_ring_create_response_process(dhd_pub_t
*dhd
, void *msg
);
576 static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t
*dhd
, void *msg
);
577 static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t
*dhd
, void *msg
);
578 static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t
*dhd
, void* msg
);
579 static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t
*dhd
, void* msg
);
583 extern bool dhd_monitor_enabled(dhd_pub_t
*dhd
, int ifidx
);
584 extern void dhd_rx_mon_pkt(dhd_pub_t
*dhdp
, host_rxbuf_cmpl_t
* msg
, void *pkt
, int ifidx
);
585 #endif /* WL_MONITOR */
587 /* Configure a soft doorbell per D2H ring */
588 static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t
*dhd
);
589 static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t
*dhd
, void *msg
);
590 static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t
*dhd
, void *buf
);
591 static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t
*dhd
, void *buf
);
592 static void dhd_prot_process_infobuf_complete(dhd_pub_t
*dhd
, void* buf
);
593 static void dhd_prot_process_d2h_mb_data(dhd_pub_t
*dhd
, void* buf
);
594 static void dhd_prot_detach_info_rings(dhd_pub_t
*dhd
);
595 static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t
*dhd
, void* buf
);
596 static void dhd_prot_process_snapshot_complete(dhd_pub_t
*dhd
, void *buf
);
598 typedef void (*dhd_msgbuf_func_t
)(dhd_pub_t
*dhd
, void *msg
);
600 /** callback functions for messages generated by the dongle */
601 #define MSG_TYPE_INVALID 0
603 static dhd_msgbuf_func_t table_lookup
[DHD_PROT_FUNCS
] = {
604 dhd_prot_noop
, /* 0 is MSG_TYPE_INVALID */
605 dhd_prot_genstatus_process
, /* MSG_TYPE_GEN_STATUS */
606 dhd_prot_ringstatus_process
, /* MSG_TYPE_RING_STATUS */
608 dhd_prot_flow_ring_create_response_process
, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
610 dhd_prot_flow_ring_delete_response_process
, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
612 dhd_prot_flow_ring_flush_response_process
, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
614 dhd_prot_ioctack_process
, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
616 dhd_prot_ioctcmplt_process
, /* MSG_TYPE_IOCTL_CMPLT */
618 dhd_prot_event_process
, /* MSG_TYPE_WL_EVENT */
620 dhd_prot_txstatus_process
, /* MSG_TYPE_TX_STATUS */
622 NULL
, /* MSG_TYPE_RX_CMPLT use dedicated handler */
624 dhd_msgbuf_dmaxfer_process
, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
625 NULL
, /* MSG_TYPE_FLOW_RING_RESUME */
626 dhd_prot_process_flow_ring_resume_response
, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
627 NULL
, /* MSG_TYPE_FLOW_RING_SUSPEND */
628 dhd_prot_process_flow_ring_suspend_response
, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
629 NULL
, /* MSG_TYPE_INFO_BUF_POST */
630 dhd_prot_process_infobuf_complete
, /* MSG_TYPE_INFO_BUF_CMPLT */
631 NULL
, /* MSG_TYPE_H2D_RING_CREATE */
632 NULL
, /* MSG_TYPE_D2H_RING_CREATE */
633 dhd_prot_process_h2d_ring_create_complete
, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
634 dhd_prot_process_d2h_ring_create_complete
, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
635 NULL
, /* MSG_TYPE_H2D_RING_CONFIG */
636 NULL
, /* MSG_TYPE_D2H_RING_CONFIG */
637 NULL
, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
638 dhd_prot_process_d2h_ring_config_complete
, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
639 NULL
, /* MSG_TYPE_H2D_MAILBOX_DATA */
640 dhd_prot_process_d2h_mb_data
, /* MSG_TYPE_D2H_MAILBOX_DATA */
641 NULL
, /* MSG_TYPE_TIMSTAMP_BUFPOST */
642 NULL
, /* MSG_TYPE_HOSTTIMSTAMP */
643 dhd_prot_process_d2h_host_ts_complete
, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
644 dhd_prot_process_fw_timestamp
, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
645 NULL
, /* MSG_TYPE_SNAPSHOT_UPLOAD */
646 dhd_prot_process_snapshot_complete
, /* MSG_TYPE_SNAPSHOT_CMPLT */
649 #ifdef DHD_RX_CHAINING
651 #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
652 (dhd_wet_chainable(dhd) && \
653 dhd_rx_pkt_chainable((dhd), (ifidx)) && \
654 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
655 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
656 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
657 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
658 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
659 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
660 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
662 static INLINE
void BCMFASTPATH
dhd_rxchain_reset(rxchain_info_t
*rxchain
);
663 static void BCMFASTPATH
dhd_rxchain_frame(dhd_pub_t
*dhd
, void *pkt
, uint ifidx
);
664 static void BCMFASTPATH
dhd_rxchain_commit(dhd_pub_t
*dhd
);
666 #define DHD_PKT_CTF_MAX_CHAIN_LEN 64
668 #endif /* DHD_RX_CHAINING */
670 #define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
672 static void dhd_prot_h2d_sync_init(dhd_pub_t
*dhd
);
676 dhd_prot_get_minidump_buf(dhd_pub_t
*dhd
)
678 return &dhd
->prot
->fw_trap_buf
;
680 #endif /* D2H_MINIDUMP */
683 dhd_prot_is_cmpl_ring_empty(dhd_pub_t
*dhd
, void *prot_info
)
685 msgbuf_ring_t
*flow_ring
= (msgbuf_ring_t
*)prot_info
;
689 if (dhd
->dma_d2h_ring_upd_support
) {
692 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, flow_ring
->idx
);
694 if (dhd
->dma_h2d_ring_upd_support
) {
697 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, flow_ring
->idx
);
699 ret
= (wr
== rd
) ? TRUE
: FALSE
;
704 dhd_prot_dump_ring_ptrs(void *prot_info
)
706 msgbuf_ring_t
*ring
= (msgbuf_ring_t
*)prot_info
;
707 DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__
,
708 ring
->curr_rd
, ring
->rd
, ring
->wr
));
712 dhd_prot_get_h2d_max_txpost(dhd_pub_t
*dhd
)
714 return (uint16
)h2d_max_txpost
;
717 dhd_prot_set_h2d_max_txpost(dhd_pub_t
*dhd
, uint16 max_txpost
)
719 h2d_max_txpost
= max_txpost
;
722 * D2H DMA to completion callback handlers. Based on the mode advertised by the
723 * dongle through the PCIE shared region, the appropriate callback will be
724 * registered in the proto layer to be invoked prior to precessing any message
725 * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
726 * does not require host participation, then a noop callback handler will be
727 * bound that simply returns the msg_type.
729 static void dhd_prot_d2h_sync_livelock(dhd_pub_t
*dhd
, uint32 msg_seqnum
, msgbuf_ring_t
*ring
,
730 uint32 tries
, volatile uchar
*msg
, int msglen
);
731 static uint8
dhd_prot_d2h_sync_seqnum(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
732 volatile cmn_msg_hdr_t
*msg
, int msglen
);
733 static uint8
dhd_prot_d2h_sync_xorcsum(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
734 volatile cmn_msg_hdr_t
*msg
, int msglen
);
735 static uint8
dhd_prot_d2h_sync_none(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
736 volatile cmn_msg_hdr_t
*msg
, int msglen
);
737 static void dhd_prot_d2h_sync_init(dhd_pub_t
*dhd
);
738 static int dhd_send_d2h_ringcreate(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring_to_create
,
739 uint16 ring_type
, uint32 id
);
740 static int dhd_send_h2d_ringcreate(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring_to_create
,
741 uint8 type
, uint32 id
);
742 static uint16
dhd_get_max_flow_rings(dhd_pub_t
*dhd
);
745 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
746 * not completed, a livelock condition occurs. Host will avert this livelock by
747 * dropping this message and moving to the next. This dropped message can lead
748 * to a packet leak, or even something disastrous in the case the dropped
749 * message happens to be a control response.
750 * Here we will log this condition. One may choose to reboot the dongle.
754 dhd_prot_d2h_sync_livelock(dhd_pub_t
*dhd
, uint32 msg_seqnum
, msgbuf_ring_t
*ring
, uint32 tries
,
755 volatile uchar
*msg
, int msglen
)
757 uint32 ring_seqnum
= ring
->seqnum
;
760 "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
761 " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
762 dhd
, ring
->name
, msg_seqnum
, ring_seqnum
, ring_seqnum
% D2H_EPOCH_MODULO
, tries
,
763 dhd
->prot
->d2h_sync_wait_max
, dhd
->prot
->d2h_sync_wait_tot
,
764 ring
->dma_buf
.va
, msg
, ring
->curr_rd
));
766 dhd_prhex("D2H MsgBuf Failure", msg
, msglen
, DHD_ERROR_VAL
);
768 dhd_bus_dump_console_buffer(dhd
->bus
);
769 dhd_prot_debug_info_print(dhd
);
771 #ifdef DHD_FW_COREDUMP
772 if (dhd
->memdump_enabled
) {
773 /* collect core dump */
774 dhd
->memdump_type
= DUMP_TYPE_BY_LIVELOCK
;
775 dhd_bus_mem_dump(dhd
);
777 #endif /* DHD_FW_COREDUMP */
779 dhd_schedule_reset(dhd
);
781 #ifdef SUPPORT_LINKDOWN_RECOVERY
782 #ifdef CONFIG_ARCH_MSM
783 dhd
->bus
->no_cfg_restore
= 1;
784 #endif /* CONFIG_ARCH_MSM */
785 dhd
->hang_reason
= HANG_REASON_MSGBUF_LIVELOCK
;
786 dhd_os_send_hang_message(dhd
);
787 #endif /* SUPPORT_LINKDOWN_RECOVERY */
791 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
792 * mode. Sequence number is always in the last word of a message.
794 static uint8 BCMFASTPATH
795 dhd_prot_d2h_sync_seqnum(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
796 volatile cmn_msg_hdr_t
*msg
, int msglen
)
799 uint32 ring_seqnum
= ring
->seqnum
% D2H_EPOCH_MODULO
;
800 int num_words
= msglen
/ sizeof(uint32
); /* num of 32bit words */
801 volatile uint32
*marker
= (volatile uint32
*)msg
+ (num_words
- 1); /* last word */
802 dhd_prot_t
*prot
= dhd
->prot
;
805 uint32 delay
= PCIE_D2H_SYNC_DELAY
;
806 uint32 total_tries
= 0;
808 ASSERT(msglen
== ring
->item_len
);
810 BCM_REFERENCE(delay
);
812 * For retries we have to make some sort of stepper algorithm.
813 * We see that every time when the Dongle comes out of the D3
814 * Cold state, the first D2H mem2mem DMA takes more time to
815 * complete, leading to livelock issues.
817 * Case 1 - Apart from Host CPU some other bus master is
818 * accessing the DDR port, probably page close to the ring
819 * so, PCIE does not get a change to update the memory.
820 * Solution - Increase the number of tries.
822 * Case 2 - The 50usec delay given by the Host CPU is not
823 * sufficient for the PCIe RC to start its work.
824 * In this case the breathing time of 50usec given by
825 * the Host CPU is not sufficient.
826 * Solution: Increase the delay in a stepper fashion.
827 * This is done to ensure that there are no
828 * unwanted extra delay introdcued in normal conditions.
830 for (step
= 1; step
<= PCIE_D2H_SYNC_NUM_OF_STEPS
; step
++) {
831 for (tries
= 0; tries
< PCIE_D2H_SYNC_WAIT_TRIES
; tries
++) {
832 msg_seqnum
= *marker
;
833 if (ltoh32(msg_seqnum
) == ring_seqnum
) { /* dma upto last word done */
834 ring
->seqnum
++; /* next expected sequence number */
838 total_tries
= (uint32
)(((step
-1) * PCIE_D2H_SYNC_WAIT_TRIES
) + tries
);
840 if (total_tries
> prot
->d2h_sync_wait_max
)
841 prot
->d2h_sync_wait_max
= total_tries
;
843 OSL_CACHE_INV(msg
, msglen
); /* invalidate and try again */
844 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
845 OSL_DELAY(delay
* step
); /* Add stepper delay */
847 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
848 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
850 dhd_prot_d2h_sync_livelock(dhd
, msg_seqnum
, ring
, total_tries
,
851 (volatile uchar
*) msg
, msglen
);
853 ring
->seqnum
++; /* skip this message ... leak of a pktid */
854 return MSG_TYPE_INVALID
; /* invalid msg_type 0 -> noop callback */
858 prot
->d2h_sync_wait_tot
+= tries
;
859 return msg
->msg_type
;
863 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
864 * mode. The xorcsum is placed in the last word of a message. Dongle will also
865 * place a seqnum in the epoch field of the cmn_msg_hdr.
867 static uint8 BCMFASTPATH
868 dhd_prot_d2h_sync_xorcsum(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
869 volatile cmn_msg_hdr_t
*msg
, int msglen
)
872 uint32 prot_checksum
= 0; /* computed checksum */
873 int num_words
= msglen
/ sizeof(uint32
); /* num of 32bit words */
874 uint8 ring_seqnum
= ring
->seqnum
% D2H_EPOCH_MODULO
;
875 dhd_prot_t
*prot
= dhd
->prot
;
877 uint32 delay
= PCIE_D2H_SYNC_DELAY
;
878 uint32 total_tries
= 0;
880 ASSERT(msglen
== ring
->item_len
);
882 BCM_REFERENCE(delay
);
884 * For retries we have to make some sort of stepper algorithm.
885 * We see that every time when the Dongle comes out of the D3
886 * Cold state, the first D2H mem2mem DMA takes more time to
887 * complete, leading to livelock issues.
889 * Case 1 - Apart from Host CPU some other bus master is
890 * accessing the DDR port, probably page close to the ring
891 * so, PCIE does not get a change to update the memory.
892 * Solution - Increase the number of tries.
894 * Case 2 - The 50usec delay given by the Host CPU is not
895 * sufficient for the PCIe RC to start its work.
896 * In this case the breathing time of 50usec given by
897 * the Host CPU is not sufficient.
898 * Solution: Increase the delay in a stepper fashion.
899 * This is done to ensure that there are no
900 * unwanted extra delay introdcued in normal conditions.
902 for (step
= 1; step
<= PCIE_D2H_SYNC_NUM_OF_STEPS
; step
++) {
903 for (tries
= 0; tries
< PCIE_D2H_SYNC_WAIT_TRIES
; tries
++) {
904 /* First verify if the seqnumber has been update,
905 * if yes, then only check xorcsum.
906 * Once seqnum and xorcsum is proper that means
907 * complete message has arrived.
909 if (msg
->epoch
== ring_seqnum
) {
910 prot_checksum
= bcm_compute_xor32((volatile uint32
*)msg
,
912 if (prot_checksum
== 0U) { /* checksum is OK */
913 ring
->seqnum
++; /* next expected sequence number */
918 total_tries
= ((step
-1) * PCIE_D2H_SYNC_WAIT_TRIES
) + tries
;
920 if (total_tries
> prot
->d2h_sync_wait_max
)
921 prot
->d2h_sync_wait_max
= total_tries
;
923 OSL_CACHE_INV(msg
, msglen
); /* invalidate and try again */
924 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
925 OSL_DELAY(delay
* step
); /* Add stepper delay */
927 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
928 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
930 DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__
, prot_checksum
));
931 dhd_prot_d2h_sync_livelock(dhd
, msg
->epoch
, ring
, total_tries
,
932 (volatile uchar
*) msg
, msglen
);
934 ring
->seqnum
++; /* skip this message ... leak of a pktid */
935 return MSG_TYPE_INVALID
; /* invalid msg_type 0 -> noop callback */
939 prot
->d2h_sync_wait_tot
+= tries
;
940 return msg
->msg_type
;
944 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
945 * need to try to sync. This noop sync handler will be bound when the dongle
946 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
948 static uint8 BCMFASTPATH
949 dhd_prot_d2h_sync_none(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
950 volatile cmn_msg_hdr_t
*msg
, int msglen
)
952 return msg
->msg_type
;
956 dhd_wakeup_ioctl_event(dhd_pub_t
*dhd
, dhd_ioctl_recieved_status_t reason
)
958 /* To synchronize with the previous memory operations call wmb() */
960 dhd
->prot
->ioctl_received
= reason
;
961 /* Call another wmb() to make sure before waking up the other event value gets updated */
963 dhd_os_ioctl_resp_wake(dhd
);
967 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
971 dhd_prot_d2h_sync_init(dhd_pub_t
*dhd
)
973 dhd_prot_t
*prot
= dhd
->prot
;
974 prot
->d2h_sync_wait_max
= 0UL;
975 prot
->d2h_sync_wait_tot
= 0UL;
977 prot
->d2hring_ctrl_cpln
.seqnum
= D2H_EPOCH_INIT_VAL
;
978 prot
->d2hring_ctrl_cpln
.current_phase
= BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
980 prot
->d2hring_tx_cpln
.seqnum
= D2H_EPOCH_INIT_VAL
;
981 prot
->d2hring_tx_cpln
.current_phase
= BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
983 prot
->d2hring_rx_cpln
.seqnum
= D2H_EPOCH_INIT_VAL
;
984 prot
->d2hring_rx_cpln
.current_phase
= BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
986 if (dhd
->d2h_sync_mode
& PCIE_SHARED_D2H_SYNC_SEQNUM
) {
987 prot
->d2h_sync_cb
= dhd_prot_d2h_sync_seqnum
;
988 DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__
));
989 } else if (dhd
->d2h_sync_mode
& PCIE_SHARED_D2H_SYNC_XORCSUM
) {
990 prot
->d2h_sync_cb
= dhd_prot_d2h_sync_xorcsum
;
991 DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__
));
993 prot
->d2h_sync_cb
= dhd_prot_d2h_sync_none
;
994 DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__
));
999 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
1002 dhd_prot_h2d_sync_init(dhd_pub_t
*dhd
)
1004 dhd_prot_t
*prot
= dhd
->prot
;
1005 prot
->h2dring_rxp_subn
.seqnum
= H2D_EPOCH_INIT_VAL
;
1006 prot
->h2dring_rxp_subn
.current_phase
= 0;
1008 prot
->h2dring_ctrl_subn
.seqnum
= H2D_EPOCH_INIT_VAL
;
1009 prot
->h2dring_ctrl_subn
.current_phase
= 0;
1012 /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
1015 * +---------------------------------------------------------------------------+
1016 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
1017 * virtual and physical address, the buffer lenght and the DMA handler.
1018 * A secdma handler is also included in the dhd_dma_buf object.
1019 * +---------------------------------------------------------------------------+
1023 dhd_base_addr_htolpa(sh_addr_t
*base_addr
, dmaaddr_t pa
)
1025 base_addr
->low_addr
= htol32(PHYSADDRLO(pa
));
1026 base_addr
->high_addr
= htol32(PHYSADDRHI(pa
));
1030 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
1033 dhd_dma_buf_audit(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
)
1035 uint32 pa_lowaddr
, end
; /* dongle uses 32bit ptr arithmetic */
1037 pa_lowaddr
= PHYSADDRLO(dma_buf
->pa
);
1038 ASSERT(PHYSADDRLO(dma_buf
->pa
) || PHYSADDRHI(dma_buf
->pa
));
1039 ASSERT(ISALIGNED(pa_lowaddr
, DMA_ALIGN_LEN
));
1040 ASSERT(dma_buf
->len
!= 0);
1042 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
1043 end
= (pa_lowaddr
+ dma_buf
->len
); /* end address */
1045 if ((end
& 0xFFFFFFFF) < (pa_lowaddr
& 0xFFFFFFFF)) { /* exclude carryover */
1046 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1047 __FUNCTION__
, pa_lowaddr
, dma_buf
->len
));
1055 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1056 * returns BCME_OK=0 on success
1057 * returns non-zero negative error value on failure.
1060 dhd_dma_buf_alloc(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
, uint32 buf_len
)
1063 osl_t
*osh
= dhd
->osh
;
1064 uint16 dma_align
= DMA_ALIGN_LEN
;
1066 ASSERT(dma_buf
!= NULL
);
1067 ASSERT(dma_buf
->va
== NULL
);
1068 ASSERT(dma_buf
->len
== 0);
1070 /* Pad the buffer length by one extra cacheline size.
1071 * Required for D2H direction.
1073 dma_pad
= (buf_len
% DHD_DMA_PAD
) ? DHD_DMA_PAD
: 0;
1074 dma_buf
->va
= DMA_ALLOC_CONSISTENT(osh
, buf_len
+ dma_pad
,
1075 dma_align
, &dma_buf
->_alloced
, &dma_buf
->pa
, &dma_buf
->dmah
);
1077 if (dma_buf
->va
== NULL
) {
1078 DHD_ERROR(("%s: buf_len %d, no memory available\n",
1079 __FUNCTION__
, buf_len
));
1083 dma_buf
->len
= buf_len
; /* not including padded len */
1085 if (dhd_dma_buf_audit(dhd
, dma_buf
) != BCME_OK
) { /* audit dma buf */
1086 dhd_dma_buf_free(dhd
, dma_buf
);
1090 dhd_dma_buf_reset(dhd
, dma_buf
); /* zero out and cache flush */
1096 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1099 dhd_dma_buf_reset(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
)
1101 if ((dma_buf
== NULL
) || (dma_buf
->va
== NULL
))
1104 (void)dhd_dma_buf_audit(dhd
, dma_buf
);
1106 /* Zero out the entire buffer and cache flush */
1107 memset((void*)dma_buf
->va
, 0, dma_buf
->len
);
1108 OSL_CACHE_FLUSH((void *)dma_buf
->va
, dma_buf
->len
);
1112 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1113 * dhd_dma_buf_alloc().
1116 dhd_dma_buf_free(dhd_pub_t
*dhd
, dhd_dma_buf_t
*dma_buf
)
1118 osl_t
*osh
= dhd
->osh
;
1122 if (dma_buf
->va
== NULL
)
1123 return; /* Allow for free invocation, when alloc failed */
1125 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1126 (void)dhd_dma_buf_audit(dhd
, dma_buf
);
1128 /* dma buffer may have been padded at allocation */
1129 DMA_FREE_CONSISTENT(osh
, dma_buf
->va
, dma_buf
->_alloced
,
1130 dma_buf
->pa
, dma_buf
->dmah
);
1132 memset(dma_buf
, 0, sizeof(dhd_dma_buf_t
));
1136 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1137 * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1140 dhd_dma_buf_init(dhd_pub_t
*dhd
, void *dhd_dma_buf
,
1141 void *va
, uint32 len
, dmaaddr_t pa
, void *dmah
, void *secdma
)
1143 dhd_dma_buf_t
*dma_buf
;
1144 ASSERT(dhd_dma_buf
);
1145 dma_buf
= (dhd_dma_buf_t
*)dhd_dma_buf
;
1149 dma_buf
->dmah
= dmah
;
1150 dma_buf
->secdma
= secdma
;
1152 /* Audit user defined configuration */
1153 (void)dhd_dma_buf_audit(dhd
, dma_buf
);
1156 /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1159 * +---------------------------------------------------------------------------+
1160 * DHD_MAP_PKTID_LOGGING
1161 * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
1162 * debugging in customer platform.
1163 * +---------------------------------------------------------------------------+
1166 #ifdef DHD_MAP_PKTID_LOGGING
1167 typedef struct dhd_pktid_log_item
{
1168 dmaaddr_t pa
; /* DMA bus address */
1169 uint64 ts_nsec
; /* Timestamp: nsec */
1170 uint32 size
; /* DMA map/unmap size */
1171 uint32 pktid
; /* Packet ID */
1172 uint8 pkttype
; /* Packet Type */
1173 uint8 rsvd
[7]; /* Reserved for future use */
1174 } dhd_pktid_log_item_t
;
1176 typedef struct dhd_pktid_log
{
1177 uint32 items
; /* number of total items */
1178 uint32 index
; /* index of pktid_log_item */
1179 dhd_pktid_log_item_t map
[0]; /* metadata storage */
1182 typedef void * dhd_pktid_log_handle_t
; /* opaque handle to pktid log */
1184 #define MAX_PKTID_LOG (2048)
1185 #define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
1186 #define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
1187 ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
1189 #define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
1190 #define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
1191 #define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
1192 dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
1193 #define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
1195 static dhd_pktid_log_handle_t
*
1196 dhd_pktid_logging_init(dhd_pub_t
*dhd
, uint32 num_items
)
1198 dhd_pktid_log_t
*log
;
1201 log_size
= DHD_PKTID_LOG_SZ(num_items
);
1202 log
= (dhd_pktid_log_t
*)MALLOCZ(dhd
->osh
, log_size
);
1204 DHD_ERROR(("%s: MALLOC failed for size %d\n",
1205 __FUNCTION__
, log_size
));
1206 return (dhd_pktid_log_handle_t
*)NULL
;
1209 log
->items
= num_items
;
1212 return (dhd_pktid_log_handle_t
*)log
; /* opaque handle */
1216 dhd_pktid_logging_fini(dhd_pub_t
*dhd
, dhd_pktid_log_handle_t
*handle
)
1218 dhd_pktid_log_t
*log
;
1221 if (handle
== NULL
) {
1222 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__
));
1226 log
= (dhd_pktid_log_t
*)handle
;
1227 log_size
= DHD_PKTID_LOG_SZ(log
->items
);
1228 MFREE(dhd
->osh
, handle
, log_size
);
1232 dhd_pktid_logging(dhd_pub_t
*dhd
, dhd_pktid_log_handle_t
*handle
, dmaaddr_t pa
,
1233 uint32 pktid
, uint32 len
, uint8 pkttype
)
1235 dhd_pktid_log_t
*log
;
1238 if (handle
== NULL
) {
1239 DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__
));
1243 log
= (dhd_pktid_log_t
*)handle
;
1245 log
->map
[idx
].ts_nsec
= OSL_LOCALTIME_NS();
1246 log
->map
[idx
].pa
= pa
;
1247 log
->map
[idx
].pktid
= pktid
;
1248 log
->map
[idx
].size
= len
;
1249 log
->map
[idx
].pkttype
= pkttype
;
1250 log
->index
= (idx
+ 1) % (log
->items
); /* update index */
1254 dhd_pktid_logging_dump(dhd_pub_t
*dhd
)
1256 dhd_prot_t
*prot
= dhd
->prot
;
1257 dhd_pktid_log_t
*map_log
, *unmap_log
;
1258 uint64 ts_sec
, ts_usec
;
1261 DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__
));
1265 map_log
= (dhd_pktid_log_t
*)(prot
->pktid_dma_map
);
1266 unmap_log
= (dhd_pktid_log_t
*)(prot
->pktid_dma_unmap
);
1267 OSL_GET_LOCALTIME(&ts_sec
, &ts_usec
);
1268 if (map_log
&& unmap_log
) {
1269 DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
1270 "current time=[%5lu.%06lu]\n", __FUNCTION__
,
1271 map_log
->index
, unmap_log
->index
,
1272 (unsigned long)ts_sec
, (unsigned long)ts_usec
));
1273 DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
1274 "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__
,
1275 (uint64
)__virt_to_phys((ulong
)(map_log
->map
)),
1276 (uint32
)(DHD_PKTID_LOG_ITEM_SZ
* map_log
->items
),
1277 (uint64
)__virt_to_phys((ulong
)(unmap_log
->map
)),
1278 (uint32
)(DHD_PKTID_LOG_ITEM_SZ
* unmap_log
->items
)));
1281 #endif /* DHD_MAP_PKTID_LOGGING */
1283 /* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
1286 * +---------------------------------------------------------------------------+
1287 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1288 * Main purpose is to save memory on the dongle, has other purposes as well.
1289 * The packet id map, also includes storage for some packet parameters that
1290 * may be saved. A native packet pointer along with the parameters may be saved
1291 * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1292 * and the metadata may be retrieved using the previously allocated packet id.
1293 * +---------------------------------------------------------------------------+
1295 #define DHD_PCIE_PKTID
1296 #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1297 #define MAX_RX_PKTID (1024)
1298 #define MAX_TX_PKTID (3072 * 2)
1300 /* On Router, the pktptr serves as a pktid. */
1302 #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1303 #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1306 /* Enum for marking the buffer color based on usage */
1307 typedef enum dhd_pkttype
{
1308 PKTTYPE_DATA_TX
= 0,
1313 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1318 #define DHD_PKTID_INVALID (0U)
1319 #define DHD_IOCTL_REQ_PKTID (0xFFFE)
1320 #define DHD_FAKE_PKTID (0xFACE)
1321 #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1322 #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1323 #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1324 #define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
1325 #define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
1326 #define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
1328 #define IS_FLOWRING(ring) \
1329 ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1331 typedef void * dhd_pktid_map_handle_t
; /* opaque handle to a pktid map */
1333 /* Construct a packet id mapping table, returning an opaque map handle */
1334 static dhd_pktid_map_handle_t
*dhd_pktid_map_init(dhd_pub_t
*dhd
, uint32 num_items
);
1336 /* Destroy a packet id mapping table, freeing all packets active in the table */
1337 static void dhd_pktid_map_fini(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*map
);
1339 #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1340 #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
1341 #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1342 #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
1345 #undef DHD_PCIE_PKTID
1346 #define DHD_PCIE_PKTID 1
1347 #endif /* MACOSX_DHD */
1349 #if defined(DHD_PCIE_PKTID)
1350 #if defined(MACOSX_DHD)
1351 #define IOCTLRESP_USE_CONSTMEM
1352 static void free_ioctl_return_buffer(dhd_pub_t
*dhd
, dhd_dma_buf_t
*retbuf
);
1353 static int alloc_ioctl_return_buffer(dhd_pub_t
*dhd
, dhd_dma_buf_t
*retbuf
);
1356 /* Determine number of pktids that are available */
1357 static INLINE uint32
dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t
*handle
);
1359 /* Allocate a unique pktid against which a pkt and some metadata is saved */
1360 static INLINE uint32
dhd_pktid_map_reserve(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
,
1361 void *pkt
, dhd_pkttype_t pkttype
);
1362 static INLINE
void dhd_pktid_map_save(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
,
1363 void *pkt
, uint32 nkey
, dmaaddr_t pa
, uint32 len
, uint8 dma
,
1364 void *dmah
, void *secdma
, dhd_pkttype_t pkttype
);
1365 static uint32
dhd_pktid_map_alloc(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*map
,
1366 void *pkt
, dmaaddr_t pa
, uint32 len
, uint8 dma
,
1367 void *dmah
, void *secdma
, dhd_pkttype_t pkttype
);
1369 /* Return an allocated pktid, retrieving previously saved pkt and metadata */
1370 static void *dhd_pktid_map_free(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*map
,
1371 uint32 id
, dmaaddr_t
*pa
, uint32
*len
, void **dmah
,
1372 void **secdma
, dhd_pkttype_t pkttype
, bool rsv_locker
);
1375 * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1377 * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1378 * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1380 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1381 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1383 #if defined(DHD_PKTID_AUDIT_ENABLED)
1384 #define USE_DHD_PKTID_AUDIT_LOCK 1
1385 /* Audit the pktidmap allocator */
1386 /* #define DHD_PKTID_AUDIT_MAP */
1388 /* Audit the pktid during production/consumption of workitems */
1389 #define DHD_PKTID_AUDIT_RING
1391 #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1392 #error "May only enabled audit of MAP or RING, at a time."
1393 #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1395 #define DHD_DUPLICATE_ALLOC 1
1396 #define DHD_DUPLICATE_FREE 2
1397 #define DHD_TEST_IS_ALLOC 3
1398 #define DHD_TEST_IS_FREE 4
1400 typedef enum dhd_pktid_map_type
{
1401 DHD_PKTID_MAP_TYPE_CTRL
= 1,
1402 DHD_PKTID_MAP_TYPE_TX
,
1403 DHD_PKTID_MAP_TYPE_RX
,
1404 DHD_PKTID_MAP_TYPE_UNKNOWN
1405 } dhd_pktid_map_type_t
;
1407 #ifdef USE_DHD_PKTID_AUDIT_LOCK
1408 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1409 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1410 #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1411 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1413 #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1414 #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
1415 #define DHD_PKTID_AUDIT_LOCK(lock) 0
1416 #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
1417 #endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1419 #endif /* DHD_PKTID_AUDIT_ENABLED */
1421 #define USE_DHD_PKTID_LOCK 1
1423 #ifdef USE_DHD_PKTID_LOCK
1424 #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1425 #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1426 #define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
1427 #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1429 #define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1430 #define DHD_PKTID_LOCK_DEINIT(osh, lock) \
1432 BCM_REFERENCE(osh); \
1433 BCM_REFERENCE(lock); \
1435 #define DHD_PKTID_LOCK(lock) 0
1436 #define DHD_PKTID_UNLOCK(lock, flags) \
1438 BCM_REFERENCE(lock); \
1439 BCM_REFERENCE(flags); \
1441 #endif /* !USE_DHD_PKTID_LOCK */
1443 typedef enum dhd_locker_state
{
1447 } dhd_locker_state_t
;
1449 /* Packet metadata saved in packet id mapper */
1451 typedef struct dhd_pktid_item
{
1452 dhd_locker_state_t state
; /* tag a locker to be free, busy or reserved */
1453 uint8 dir
; /* dma map direction (Tx=flush or Rx=invalidate) */
1454 dhd_pkttype_t pkttype
; /* pktlists are maintained based on pkttype */
1455 uint16 len
; /* length of mapped packet's buffer */
1456 void *pkt
; /* opaque native pointer to a packet */
1457 dmaaddr_t pa
; /* physical address of mapped packet's buffer */
1458 void *dmah
; /* handle to OS specific DMA map */
1462 typedef uint32 dhd_pktid_key_t
;
1464 typedef struct dhd_pktid_map
{
1465 uint32 items
; /* total items in map */
1466 uint32 avail
; /* total available items */
1467 int failures
; /* lockers unavailable count */
1468 /* Spinlock to protect dhd_pktid_map in process/tasklet context */
1469 void *pktid_lock
; /* Used when USE_DHD_PKTID_LOCK is defined */
1471 #if defined(DHD_PKTID_AUDIT_ENABLED)
1472 void *pktid_audit_lock
;
1473 struct bcm_mwbmap
*pktid_audit
; /* multi word bitmap based audit */
1474 #endif /* DHD_PKTID_AUDIT_ENABLED */
1475 dhd_pktid_key_t
*keys
; /* map_items +1 unique pkt ids */
1476 dhd_pktid_item_t lockers
[0]; /* metadata storage */
1480 * PktId (Locker) #0 is never allocated and is considered invalid.
1482 * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1483 * depleted pktid pool and must not be used by the caller.
1485 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1488 #define DHD_PKTID_FREE_LOCKER (FALSE)
1489 #define DHD_PKTID_RSV_LOCKER (TRUE)
1491 #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1492 #define DHD_PKIDMAP_ITEMS(items) (items)
1493 #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1494 (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1495 #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
1497 #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
1499 /* Convert a packet to a pktid, and save pkt pointer in busy locker */
1500 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
1501 dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1502 /* Reuse a previously reserved locker to save packet params */
1503 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1504 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1505 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1506 (dhd_pkttype_t)(pkttype))
1507 /* Convert a packet to a pktid, and save packet params in locker */
1508 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1509 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1510 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1511 (dhd_pkttype_t)(pkttype))
1513 /* Convert pktid to a packet, and free the locker */
1514 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1515 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1516 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1517 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1519 /* Convert the pktid to a packet, empty locker, but keep it reserved */
1520 #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1521 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1522 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1523 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1525 #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1527 #if defined(DHD_PKTID_AUDIT_ENABLED)
1530 dhd_get_pktid_map_type(dhd_pub_t
*dhd
, dhd_pktid_map_t
*pktid_map
)
1532 dhd_prot_t
*prot
= dhd
->prot
;
1535 if (pktid_map
== prot
->pktid_ctrl_map
) {
1536 pktid_map_type
= DHD_PKTID_MAP_TYPE_CTRL
;
1537 } else if (pktid_map
== prot
->pktid_tx_map
) {
1538 pktid_map_type
= DHD_PKTID_MAP_TYPE_TX
;
1539 } else if (pktid_map
== prot
->pktid_rx_map
) {
1540 pktid_map_type
= DHD_PKTID_MAP_TYPE_RX
;
1542 pktid_map_type
= DHD_PKTID_MAP_TYPE_UNKNOWN
;
1545 return pktid_map_type
;
1549 * __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1552 __dhd_pktid_audit(dhd_pub_t
*dhd
, dhd_pktid_map_t
*pktid_map
, uint32 pktid
,
1553 const int test_for
, const char *errmsg
)
1555 #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1556 struct bcm_mwbmap
*handle
;
1559 int error
= BCME_OK
;
1561 if (pktid_map
== (dhd_pktid_map_t
*)NULL
) {
1562 DHD_ERROR((DHD_PKT_AUDIT_STR
"Pkt id map NULL\n", errmsg
));
1566 flags
= DHD_PKTID_AUDIT_LOCK(pktid_map
->pktid_audit_lock
);
1568 handle
= pktid_map
->pktid_audit
;
1569 if (handle
== (struct bcm_mwbmap
*)NULL
) {
1570 DHD_ERROR((DHD_PKT_AUDIT_STR
"Handle NULL\n", errmsg
));
1574 /* Exclude special pktids from audit */
1575 ignore_audit
= (pktid
== DHD_IOCTL_REQ_PKTID
) | (pktid
== DHD_FAKE_PKTID
);
1580 if ((pktid
== DHD_PKTID_INVALID
) || (pktid
> pktid_map
->items
)) {
1581 DHD_ERROR((DHD_PKT_AUDIT_STR
"PktId<%d> invalid\n", errmsg
, pktid
));
1588 case DHD_DUPLICATE_ALLOC
:
1589 if (!bcm_mwbmap_isfree(handle
, pktid
)) {
1590 DHD_ERROR((DHD_PKT_AUDIT_STR
"PktId<%d> alloc duplicate\n",
1594 bcm_mwbmap_force(handle
, pktid
);
1598 case DHD_DUPLICATE_FREE
:
1599 if (bcm_mwbmap_isfree(handle
, pktid
)) {
1600 DHD_ERROR((DHD_PKT_AUDIT_STR
"PktId<%d> free duplicate\n",
1604 bcm_mwbmap_free(handle
, pktid
);
1608 case DHD_TEST_IS_ALLOC
:
1609 if (bcm_mwbmap_isfree(handle
, pktid
)) {
1610 DHD_ERROR((DHD_PKT_AUDIT_STR
"PktId<%d> is not allocated\n",
1616 case DHD_TEST_IS_FREE
:
1617 if (!bcm_mwbmap_isfree(handle
, pktid
)) {
1618 DHD_ERROR((DHD_PKT_AUDIT_STR
"PktId<%d> is not free",
1625 DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__
, test_for
));
1631 DHD_PKTID_AUDIT_UNLOCK(pktid_map
->pktid_audit_lock
, flags
);
1636 dhd_pktid_audit(dhd_pub_t
*dhd
, dhd_pktid_map_t
*pktid_map
, uint32 pktid
,
1637 const int test_for
, const char *errmsg
)
1640 ret
= __dhd_pktid_audit(dhd
, pktid_map
, pktid
, test_for
, errmsg
);
1641 if (ret
== BCME_ERROR
) {
1642 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
1643 __FUNCTION__
, pktid
, dhd_get_pktid_map_type(dhd
, pktid_map
)));
1644 dhd_pktid_error_handler(dhd
);
1650 #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1651 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1654 dhd_pktid_audit_ring_debug(dhd_pub_t
*dhdp
, dhd_pktid_map_t
*map
, uint32 pktid
,
1655 const int test_for
, void *msg
, uint32 msg_len
, const char *func
)
1658 ret
= __dhd_pktid_audit(dhdp
, map
, pktid
, test_for
, func
);
1659 if (ret
== BCME_ERROR
) {
1660 DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
1661 __FUNCTION__
, pktid
, dhd_get_pktid_map_type(dhdp
, map
)));
1662 prhex(func
, (uchar
*)msg
, msg_len
);
1663 dhd_pktid_error_handler(dhdp
);
1667 #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
1668 dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
1669 (pktid), (test_for), msg, msg_len, __FUNCTION__)
1671 #endif /* DHD_PKTID_AUDIT_ENABLED */
1674 * +---------------------------------------------------------------------------+
1675 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1677 * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
1679 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1680 * packet id is returned. This unique packet id may be used to retrieve the
1681 * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1682 * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1683 * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1685 * Implementation Note:
1686 * Convert this into a <key,locker> abstraction and place into bcmutils !
1687 * Locker abstraction should treat contents as opaque storage, and a
1688 * callback should be registered to handle busy lockers on destructor.
1690 * +---------------------------------------------------------------------------+
1693 /** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1695 static dhd_pktid_map_handle_t
*
1696 dhd_pktid_map_init(dhd_pub_t
*dhd
, uint32 num_items
)
1700 dhd_pktid_map_t
*map
;
1701 uint32 dhd_pktid_map_sz
;
1706 dhd_pktid_map_sz
= DHD_PKTID_MAP_SZ(num_items
);
1708 map
= (dhd_pktid_map_t
*)VMALLOC(osh
, dhd_pktid_map_sz
);
1710 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1711 __FUNCTION__
, __LINE__
, dhd_pktid_map_sz
));
1712 return (dhd_pktid_map_handle_t
*)NULL
;
1715 map
->items
= num_items
;
1716 map
->avail
= num_items
;
1718 map_items
= DHD_PKIDMAP_ITEMS(map
->items
);
1720 map_keys_sz
= DHD_PKTIDMAP_KEYS_SZ(map
->items
);
1722 /* Initialize the lock that protects this structure */
1723 map
->pktid_lock
= DHD_PKTID_LOCK_INIT(osh
);
1724 if (map
->pktid_lock
== NULL
) {
1725 DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__
, __LINE__
));
1729 map
->keys
= (dhd_pktid_key_t
*)MALLOC(osh
, map_keys_sz
);
1730 if (map
->keys
== NULL
) {
1731 DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
1732 __FUNCTION__
, __LINE__
, map_keys_sz
));
1736 #if defined(DHD_PKTID_AUDIT_ENABLED)
1737 /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1738 map
->pktid_audit
= bcm_mwbmap_init(osh
, map_items
+ 1);
1739 if (map
->pktid_audit
== (struct bcm_mwbmap
*)NULL
) {
1740 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__
, __LINE__
));
1743 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1744 __FUNCTION__
, __LINE__
, map_items
+ 1));
1746 map
->pktid_audit_lock
= DHD_PKTID_AUDIT_LOCK_INIT(osh
);
1747 #endif /* DHD_PKTID_AUDIT_ENABLED */
1749 for (nkey
= 1; nkey
<= map_items
; nkey
++) { /* locker #0 is reserved */
1750 map
->keys
[nkey
] = nkey
; /* populate with unique keys */
1751 map
->lockers
[nkey
].state
= LOCKER_IS_FREE
;
1752 map
->lockers
[nkey
].pkt
= NULL
; /* bzero: redundant */
1753 map
->lockers
[nkey
].len
= 0;
1756 /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
1757 map
->lockers
[DHD_PKTID_INVALID
].state
= LOCKER_IS_BUSY
; /* tag locker #0 as inuse */
1758 map
->lockers
[DHD_PKTID_INVALID
].pkt
= NULL
; /* bzero: redundant */
1759 map
->lockers
[DHD_PKTID_INVALID
].len
= 0;
1761 #if defined(DHD_PKTID_AUDIT_ENABLED)
1762 /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1763 bcm_mwbmap_force(map
->pktid_audit
, DHD_PKTID_INVALID
);
1764 #endif /* DHD_PKTID_AUDIT_ENABLED */
1766 return (dhd_pktid_map_handle_t
*)map
; /* opaque handle */
1770 #if defined(DHD_PKTID_AUDIT_ENABLED)
1771 if (map
->pktid_audit
!= (struct bcm_mwbmap
*)NULL
) {
1772 bcm_mwbmap_fini(osh
, map
->pktid_audit
); /* Destruct pktid_audit */
1773 map
->pktid_audit
= (struct bcm_mwbmap
*)NULL
;
1774 if (map
->pktid_audit_lock
)
1775 DHD_PKTID_AUDIT_LOCK_DEINIT(osh
, map
->pktid_audit_lock
);
1777 #endif /* DHD_PKTID_AUDIT_ENABLED */
1780 MFREE(osh
, map
->keys
, map_keys_sz
);
1783 if (map
->pktid_lock
) {
1784 DHD_PKTID_LOCK_DEINIT(osh
, map
->pktid_lock
);
1787 VMFREE(osh
, map
, dhd_pktid_map_sz
);
1789 return (dhd_pktid_map_handle_t
*)NULL
;
1793 * Retrieve all allocated keys and free all <numbered_key, locker>.
1794 * Freeing implies: unmapping the buffers and freeing the native packet
1795 * This could have been a callback registered with the pktid mapper.
1798 dhd_pktid_map_reset(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
)
1802 dhd_pktid_map_t
*map
;
1803 dhd_pktid_item_t
*locker
;
1805 unsigned long flags
;
1806 bool data_tx
= FALSE
;
1808 map
= (dhd_pktid_map_t
*)handle
;
1809 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
1812 map_items
= DHD_PKIDMAP_ITEMS(map
->items
);
1813 /* skip reserved KEY #0, and start from 1 */
1815 for (nkey
= 1; nkey
<= map_items
; nkey
++) {
1816 if (map
->lockers
[nkey
].state
== LOCKER_IS_BUSY
) {
1817 locker
= &map
->lockers
[nkey
];
1818 locker
->state
= LOCKER_IS_FREE
;
1819 data_tx
= (locker
->pkttype
== PKTTYPE_DATA_TX
);
1821 OSL_ATOMIC_DEC(dhd
->osh
, &dhd
->prot
->active_tx_count
);
1824 #ifdef DHD_PKTID_AUDIT_RING
1825 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_DUPLICATE_FREE
); /* duplicate frees */
1826 #endif /* DHD_PKTID_AUDIT_RING */
1827 #ifdef DHD_MAP_PKTID_LOGGING
1828 DHD_PKTID_LOG(dhd
, dhd
->prot
->pktid_dma_unmap
,
1829 locker
->pa
, nkey
, locker
->len
,
1831 #endif /* DHD_MAP_PKTID_LOGGING */
1834 if (SECURE_DMA_ENAB(dhd
->osh
))
1835 SECURE_DMA_UNMAP(osh
, locker
->pa
,
1836 locker
->len
, locker
->dir
, 0,
1837 locker
->dmah
, locker
->secdma
, 0);
1839 DMA_UNMAP(osh
, locker
->pa
, locker
->len
,
1840 locker
->dir
, 0, locker
->dmah
);
1842 dhd_prot_packet_free(dhd
, (ulong
*)locker
->pkt
,
1843 locker
->pkttype
, data_tx
);
1846 #ifdef DHD_PKTID_AUDIT_RING
1847 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_TEST_IS_FREE
);
1848 #endif /* DHD_PKTID_AUDIT_RING */
1850 map
->keys
[nkey
] = nkey
; /* populate with unique keys */
1853 map
->avail
= map_items
;
1854 memset(&map
->lockers
[1], 0, sizeof(dhd_pktid_item_t
) * map_items
);
1855 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
1858 #ifdef IOCTLRESP_USE_CONSTMEM
1859 /** Called in detach scenario. Releasing IOCTL buffers. */
1861 dhd_pktid_map_reset_ioctl(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
)
1864 dhd_pktid_map_t
*map
;
1865 dhd_pktid_item_t
*locker
;
1867 unsigned long flags
;
1869 map
= (dhd_pktid_map_t
*)handle
;
1870 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
1872 map_items
= DHD_PKIDMAP_ITEMS(map
->items
);
1873 /* skip reserved KEY #0, and start from 1 */
1874 for (nkey
= 1; nkey
<= map_items
; nkey
++) {
1875 if (map
->lockers
[nkey
].state
== LOCKER_IS_BUSY
) {
1876 dhd_dma_buf_t retbuf
;
1878 #ifdef DHD_PKTID_AUDIT_RING
1879 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_DUPLICATE_FREE
); /* duplicate frees */
1880 #endif /* DHD_PKTID_AUDIT_RING */
1882 locker
= &map
->lockers
[nkey
];
1883 retbuf
.va
= locker
->pkt
;
1884 retbuf
.len
= locker
->len
;
1885 retbuf
.pa
= locker
->pa
;
1886 retbuf
.dmah
= locker
->dmah
;
1887 retbuf
.secdma
= locker
->secdma
;
1889 free_ioctl_return_buffer(dhd
, &retbuf
);
1892 #ifdef DHD_PKTID_AUDIT_RING
1893 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_TEST_IS_FREE
);
1894 #endif /* DHD_PKTID_AUDIT_RING */
1896 map
->keys
[nkey
] = nkey
; /* populate with unique keys */
1899 map
->avail
= map_items
;
1900 memset(&map
->lockers
[1], 0, sizeof(dhd_pktid_item_t
) * map_items
);
1901 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
1903 #endif /* IOCTLRESP_USE_CONSTMEM */
1906 * Free the pktid map.
1909 dhd_pktid_map_fini(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
)
1911 dhd_pktid_map_t
*map
;
1912 uint32 dhd_pktid_map_sz
;
1918 /* Free any pending packets */
1919 dhd_pktid_map_reset(dhd
, handle
);
1921 map
= (dhd_pktid_map_t
*)handle
;
1922 dhd_pktid_map_sz
= DHD_PKTID_MAP_SZ(map
->items
);
1923 map_keys_sz
= DHD_PKTIDMAP_KEYS_SZ(map
->items
);
1925 DHD_PKTID_LOCK_DEINIT(dhd
->osh
, map
->pktid_lock
);
1927 #if defined(DHD_PKTID_AUDIT_ENABLED)
1928 if (map
->pktid_audit
!= (struct bcm_mwbmap
*)NULL
) {
1929 bcm_mwbmap_fini(dhd
->osh
, map
->pktid_audit
); /* Destruct pktid_audit */
1930 map
->pktid_audit
= (struct bcm_mwbmap
*)NULL
;
1931 if (map
->pktid_audit_lock
) {
1932 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd
->osh
, map
->pktid_audit_lock
);
1935 #endif /* DHD_PKTID_AUDIT_ENABLED */
1936 MFREE(dhd
->osh
, map
->keys
, map_keys_sz
);
1937 VMFREE(dhd
->osh
, handle
, dhd_pktid_map_sz
);
1939 #ifdef IOCTLRESP_USE_CONSTMEM
1941 dhd_pktid_map_fini_ioctl(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
)
1943 dhd_pktid_map_t
*map
;
1944 uint32 dhd_pktid_map_sz
;
1950 /* Free any pending packets */
1951 dhd_pktid_map_reset_ioctl(dhd
, handle
);
1953 map
= (dhd_pktid_map_t
*)handle
;
1954 dhd_pktid_map_sz
= DHD_PKTID_MAP_SZ(map
->items
);
1955 map_keys_sz
= DHD_PKTIDMAP_KEYS_SZ(map
->items
);
1957 DHD_PKTID_LOCK_DEINIT(dhd
->osh
, map
->pktid_lock
);
1959 #if defined(DHD_PKTID_AUDIT_ENABLED)
1960 if (map
->pktid_audit
!= (struct bcm_mwbmap
*)NULL
) {
1961 bcm_mwbmap_fini(dhd
->osh
, map
->pktid_audit
); /* Destruct pktid_audit */
1962 map
->pktid_audit
= (struct bcm_mwbmap
*)NULL
;
1963 if (map
->pktid_audit_lock
) {
1964 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd
->osh
, map
->pktid_audit_lock
);
1967 #endif /* DHD_PKTID_AUDIT_ENABLED */
1969 MFREE(dhd
->osh
, map
->keys
, map_keys_sz
);
1970 VMFREE(dhd
->osh
, handle
, dhd_pktid_map_sz
);
1972 #endif /* IOCTLRESP_USE_CONSTMEM */
1974 /** Get the pktid free count */
1975 static INLINE uint32 BCMFASTPATH
1976 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t
*handle
)
1978 dhd_pktid_map_t
*map
;
1980 unsigned long flags
;
1982 ASSERT(handle
!= NULL
);
1983 map
= (dhd_pktid_map_t
*)handle
;
1985 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
1987 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
1993 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1994 * yet populated. Invoke the pktid save api to populate the packet parameters
1995 * into the locker. This function is not reentrant, and is the caller's
1996 * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
1997 * a failure case, implying a depleted pool of pktids.
1999 static INLINE uint32
2000 dhd_pktid_map_reserve(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
,
2001 void *pkt
, dhd_pkttype_t pkttype
)
2004 dhd_pktid_map_t
*map
;
2005 dhd_pktid_item_t
*locker
;
2006 unsigned long flags
;
2008 ASSERT(handle
!= NULL
);
2009 map
= (dhd_pktid_map_t
*)handle
;
2011 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
2013 if ((int)(map
->avail
) <= 0) { /* no more pktids to allocate */
2015 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__
, __LINE__
));
2016 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2017 return DHD_PKTID_INVALID
; /* failed alloc request */
2020 ASSERT(map
->avail
<= map
->items
);
2021 nkey
= map
->keys
[map
->avail
]; /* fetch a free locker, pop stack */
2023 if ((map
->avail
> map
->items
) || (nkey
> map
->items
)) {
2025 DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
2026 " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
2027 __FUNCTION__
, __LINE__
, map
->avail
, nkey
,
2029 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2030 return DHD_PKTID_INVALID
; /* failed alloc request */
2033 locker
= &map
->lockers
[nkey
]; /* save packet metadata in locker */
2035 locker
->pkt
= pkt
; /* pkt is saved, other params not yet saved. */
2037 locker
->state
= LOCKER_IS_BUSY
; /* reserve this locker */
2039 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2041 ASSERT(nkey
!= DHD_PKTID_INVALID
);
2043 return nkey
; /* return locker's numbered key */
2047 * dhd_pktid_map_save - Save a packet's parameters into a locker
2048 * corresponding to a previously reserved unique numbered key.
2051 dhd_pktid_map_save(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
, void *pkt
,
2052 uint32 nkey
, dmaaddr_t pa
, uint32 len
, uint8 dir
, void *dmah
, void *secdma
,
2053 dhd_pkttype_t pkttype
)
2055 dhd_pktid_map_t
*map
;
2056 dhd_pktid_item_t
*locker
;
2057 unsigned long flags
;
2059 ASSERT(handle
!= NULL
);
2060 map
= (dhd_pktid_map_t
*)handle
;
2062 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
2064 if ((nkey
== DHD_PKTID_INVALID
) || (nkey
> DHD_PKIDMAP_ITEMS(map
->items
))) {
2065 DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
2066 __FUNCTION__
, __LINE__
, nkey
, pkttype
));
2067 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2068 #ifdef DHD_FW_COREDUMP
2069 if (dhd
->memdump_enabled
) {
2070 /* collect core dump */
2071 dhd
->memdump_type
= DUMP_TYPE_PKTID_INVALID
;
2072 dhd_bus_mem_dump(dhd
);
2076 #endif /* DHD_FW_COREDUMP */
2080 locker
= &map
->lockers
[nkey
];
2082 ASSERT(((locker
->state
== LOCKER_IS_BUSY
) && (locker
->pkt
== pkt
)) ||
2083 ((locker
->state
== LOCKER_IS_RSVD
) && (locker
->pkt
== NULL
)));
2085 /* store contents in locker */
2088 locker
->len
= (uint16
)len
; /* 16bit len */
2089 locker
->dmah
= dmah
; /* 16bit len */
2090 locker
->secdma
= secdma
;
2091 locker
->pkttype
= pkttype
;
2093 locker
->state
= LOCKER_IS_BUSY
; /* make this locker busy */
2094 #ifdef DHD_MAP_PKTID_LOGGING
2095 DHD_PKTID_LOG(dhd
, dhd
->prot
->pktid_dma_map
, pa
, nkey
, len
, pkttype
);
2096 #endif /* DHD_MAP_PKTID_LOGGING */
2097 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2101 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
2102 * contents into the corresponding locker. Return the numbered key.
2104 static uint32 BCMFASTPATH
2105 dhd_pktid_map_alloc(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
, void *pkt
,
2106 dmaaddr_t pa
, uint32 len
, uint8 dir
, void *dmah
, void *secdma
,
2107 dhd_pkttype_t pkttype
)
2111 nkey
= dhd_pktid_map_reserve(dhd
, handle
, pkt
, pkttype
);
2112 if (nkey
!= DHD_PKTID_INVALID
) {
2113 dhd_pktid_map_save(dhd
, handle
, pkt
, nkey
, pa
,
2114 len
, dir
, dmah
, secdma
, pkttype
);
2121 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
2122 * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
2123 * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
2124 * value. Only a previously allocated pktid may be freed.
2126 static void * BCMFASTPATH
2127 dhd_pktid_map_free(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*handle
, uint32 nkey
,
2128 dmaaddr_t
*pa
, uint32
*len
, void **dmah
, void **secdma
, dhd_pkttype_t pkttype
,
2131 dhd_pktid_map_t
*map
;
2132 dhd_pktid_item_t
*locker
;
2134 unsigned long long locker_addr
;
2135 unsigned long flags
;
2137 ASSERT(handle
!= NULL
);
2139 map
= (dhd_pktid_map_t
*)handle
;
2141 DHD_PKTID_LOCK(map
->pktid_lock
, flags
);
2143 if ((nkey
== DHD_PKTID_INVALID
) || (nkey
> DHD_PKIDMAP_ITEMS(map
->items
))) {
2144 DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
2145 __FUNCTION__
, __LINE__
, nkey
, pkttype
));
2146 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2147 #ifdef DHD_FW_COREDUMP
2148 if (dhd
->memdump_enabled
) {
2149 /* collect core dump */
2150 dhd
->memdump_type
= DUMP_TYPE_PKTID_INVALID
;
2151 dhd_bus_mem_dump(dhd
);
2155 #endif /* DHD_FW_COREDUMP */
2159 locker
= &map
->lockers
[nkey
];
2161 #if defined(DHD_PKTID_AUDIT_MAP)
2162 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_DUPLICATE_FREE
); /* Audit duplicate FREE */
2163 #endif /* DHD_PKTID_AUDIT_MAP */
2165 /* Debug check for cloned numbered key */
2166 if (locker
->state
== LOCKER_IS_FREE
) {
2167 DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
2168 __FUNCTION__
, __LINE__
, nkey
));
2169 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2170 #ifdef DHD_FW_COREDUMP
2171 if (dhd
->memdump_enabled
) {
2172 /* collect core dump */
2173 dhd
->memdump_type
= DUMP_TYPE_PKTID_INVALID
;
2174 dhd_bus_mem_dump(dhd
);
2178 #endif /* DHD_FW_COREDUMP */
2182 /* Check for the colour of the buffer i.e The buffer posted for TX,
2183 * should be freed for TX completion. Similarly the buffer posted for
2184 * IOCTL should be freed for IOCT completion etc.
2186 if ((pkttype
!= PKTTYPE_NO_CHECK
) && (locker
->pkttype
!= pkttype
)) {
2188 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
2189 __FUNCTION__
, __LINE__
, nkey
));
2191 PHYSADDRTOULONG(locker
->pa
, locker_addr
);
2193 locker_addr
= PHYSADDRLO(locker
->pa
);
2194 #endif /* BCMDMA64OSL */
2195 DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
2196 "pkttype <%d> locker->pa <0x%llx> \n",
2197 __FUNCTION__
, __LINE__
, locker
->state
, locker
->pkttype
,
2198 pkttype
, locker_addr
));
2199 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2200 #ifdef DHD_FW_COREDUMP
2201 if (dhd
->memdump_enabled
) {
2202 /* collect core dump */
2203 dhd
->memdump_type
= DUMP_TYPE_PKTID_INVALID
;
2204 dhd_bus_mem_dump(dhd
);
2208 #endif /* DHD_FW_COREDUMP */
2212 if (rsv_locker
== DHD_PKTID_FREE_LOCKER
) {
2214 map
->keys
[map
->avail
] = nkey
; /* make this numbered key available */
2215 locker
->state
= LOCKER_IS_FREE
; /* open and free Locker */
2217 /* pktid will be reused, but the locker does not have a valid pkt */
2218 locker
->state
= LOCKER_IS_RSVD
;
2221 #if defined(DHD_PKTID_AUDIT_MAP)
2222 DHD_PKTID_AUDIT(dhd
, map
, nkey
, DHD_TEST_IS_FREE
);
2223 #endif /* DHD_PKTID_AUDIT_MAP */
2224 #ifdef DHD_MAP_PKTID_LOGGING
2225 DHD_PKTID_LOG(dhd
, dhd
->prot
->pktid_dma_unmap
, locker
->pa
, nkey
,
2226 (uint32
)locker
->len
, pkttype
);
2227 #endif /* DHD_MAP_PKTID_LOGGING */
2229 *pa
= locker
->pa
; /* return contents of locker */
2230 *len
= (uint32
)locker
->len
;
2231 *dmah
= locker
->dmah
;
2232 *secdma
= locker
->secdma
;
2235 locker
->pkt
= NULL
; /* Clear pkt */
2238 DHD_PKTID_UNLOCK(map
->pktid_lock
, flags
);
2243 #else /* ! DHD_PCIE_PKTID */
2245 typedef struct pktlist
{
2246 PKT_LIST
*tx_pkt_list
; /* list for tx packets */
2247 PKT_LIST
*rx_pkt_list
; /* list for rx packets */
2248 PKT_LIST
*ctrl_pkt_list
; /* list for ioctl/event buf post */
2252 * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
2253 * of a one to one mapping 32bit pktptr and a 32bit pktid.
2255 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
2256 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
2258 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
2260 #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
2261 #define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
2263 static INLINE uint32
dhd_native_to_pktid(dhd_pktid_map_handle_t
*map
, void *pktptr32
,
2264 dmaaddr_t pa
, uint32 dma_len
, void *dmah
, void *secdma
,
2265 dhd_pkttype_t pkttype
);
2266 static INLINE
void * dhd_pktid_to_native(dhd_pktid_map_handle_t
*map
, uint32 pktid32
,
2267 dmaaddr_t
*pa
, uint32
*dma_len
, void **dmah
, void **secdma
,
2268 dhd_pkttype_t pkttype
);
2270 static dhd_pktid_map_handle_t
*
2271 dhd_pktid_map_init(dhd_pub_t
*dhd
, uint32 num_items
)
2273 osl_t
*osh
= dhd
->osh
;
2274 pktlists_t
*handle
= NULL
;
2276 if ((handle
= (pktlists_t
*) MALLOCZ(osh
, sizeof(pktlists_t
))) == NULL
) {
2277 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2278 __FUNCTION__
, __LINE__
, sizeof(pktlists_t
)));
2282 if ((handle
->tx_pkt_list
= (PKT_LIST
*) MALLOC(osh
, sizeof(PKT_LIST
))) == NULL
) {
2283 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2284 __FUNCTION__
, __LINE__
, sizeof(PKT_LIST
)));
2288 if ((handle
->rx_pkt_list
= (PKT_LIST
*) MALLOC(osh
, sizeof(PKT_LIST
))) == NULL
) {
2289 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2290 __FUNCTION__
, __LINE__
, sizeof(PKT_LIST
)));
2294 if ((handle
->ctrl_pkt_list
= (PKT_LIST
*) MALLOC(osh
, sizeof(PKT_LIST
))) == NULL
) {
2295 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2296 __FUNCTION__
, __LINE__
, sizeof(PKT_LIST
)));
2300 PKTLIST_INIT(handle
->tx_pkt_list
);
2301 PKTLIST_INIT(handle
->rx_pkt_list
);
2302 PKTLIST_INIT(handle
->ctrl_pkt_list
);
2304 return (dhd_pktid_map_handle_t
*) handle
;
2307 if (handle
->ctrl_pkt_list
) {
2308 MFREE(osh
, handle
->ctrl_pkt_list
, sizeof(PKT_LIST
));
2311 if (handle
->rx_pkt_list
) {
2312 MFREE(osh
, handle
->rx_pkt_list
, sizeof(PKT_LIST
));
2315 if (handle
->tx_pkt_list
) {
2316 MFREE(osh
, handle
->tx_pkt_list
, sizeof(PKT_LIST
));
2320 MFREE(osh
, handle
, sizeof(pktlists_t
));
2324 return (dhd_pktid_map_handle_t
*)NULL
;
2328 dhd_pktid_map_reset(dhd_pub_t
*dhd
, pktlists_t
*handle
)
2330 osl_t
*osh
= dhd
->osh
;
2332 if (handle
->ctrl_pkt_list
) {
2333 PKTLIST_FINI(handle
->ctrl_pkt_list
);
2334 MFREE(osh
, handle
->ctrl_pkt_list
, sizeof(PKT_LIST
));
2337 if (handle
->rx_pkt_list
) {
2338 PKTLIST_FINI(handle
->rx_pkt_list
);
2339 MFREE(osh
, handle
->rx_pkt_list
, sizeof(PKT_LIST
));
2342 if (handle
->tx_pkt_list
) {
2343 PKTLIST_FINI(handle
->tx_pkt_list
);
2344 MFREE(osh
, handle
->tx_pkt_list
, sizeof(PKT_LIST
));
2349 dhd_pktid_map_fini(dhd_pub_t
*dhd
, dhd_pktid_map_handle_t
*map
)
2351 osl_t
*osh
= dhd
->osh
;
2352 pktlists_t
*handle
= (pktlists_t
*) map
;
2354 ASSERT(handle
!= NULL
);
2355 if (handle
== (pktlists_t
*)NULL
) {
2359 dhd_pktid_map_reset(dhd
, handle
);
2362 MFREE(osh
, handle
, sizeof(pktlists_t
));
2366 /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2367 static INLINE uint32
2368 dhd_native_to_pktid(dhd_pktid_map_handle_t
*map
, void *pktptr32
,
2369 dmaaddr_t pa
, uint32 dma_len
, void *dmah
, void *secdma
,
2370 dhd_pkttype_t pkttype
)
2372 pktlists_t
*handle
= (pktlists_t
*) map
;
2373 ASSERT(pktptr32
!= NULL
);
2374 DHD_PKT_SET_DMA_LEN(pktptr32
, dma_len
);
2375 DHD_PKT_SET_DMAH(pktptr32
, dmah
);
2376 DHD_PKT_SET_PA(pktptr32
, pa
);
2377 DHD_PKT_SET_SECDMA(pktptr32
, secdma
);
2379 if (pkttype
== PKTTYPE_DATA_TX
) {
2380 PKTLIST_ENQ(handle
->tx_pkt_list
, pktptr32
);
2381 } else if (pkttype
== PKTTYPE_DATA_RX
) {
2382 PKTLIST_ENQ(handle
->rx_pkt_list
, pktptr32
);
2384 PKTLIST_ENQ(handle
->ctrl_pkt_list
, pktptr32
);
2387 return DHD_PKTID32(pktptr32
);
2390 /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2391 static INLINE
void *
2392 dhd_pktid_to_native(dhd_pktid_map_handle_t
*map
, uint32 pktid32
,
2393 dmaaddr_t
*pa
, uint32
*dma_len
, void **dmah
, void **secdma
,
2394 dhd_pkttype_t pkttype
)
2396 pktlists_t
*handle
= (pktlists_t
*) map
;
2399 ASSERT(pktid32
!= 0U);
2400 pktptr32
= DHD_PKTPTR32(pktid32
);
2401 *dma_len
= DHD_PKT_GET_DMA_LEN(pktptr32
);
2402 *dmah
= DHD_PKT_GET_DMAH(pktptr32
);
2403 *pa
= DHD_PKT_GET_PA(pktptr32
);
2404 *secdma
= DHD_PKT_GET_SECDMA(pktptr32
);
2406 if (pkttype
== PKTTYPE_DATA_TX
) {
2407 PKTLIST_UNLINK(handle
->tx_pkt_list
, pktptr32
);
2408 } else if (pkttype
== PKTTYPE_DATA_RX
) {
2409 PKTLIST_UNLINK(handle
->rx_pkt_list
, pktptr32
);
2411 PKTLIST_UNLINK(handle
->ctrl_pkt_list
, pktptr32
);
2417 #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
2419 #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2420 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2421 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2422 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2425 #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2426 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2427 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2428 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2431 #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2432 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
2433 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2434 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2435 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2438 #define DHD_PKTID_AVAIL(map) (~0)
2440 #endif /* ! DHD_PCIE_PKTID */
2442 /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2445 * The PCIE FD protocol layer is constructed in two phases:
2446 * Phase 1. dhd_prot_attach()
2447 * Phase 2. dhd_prot_init()
2449 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2450 * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2451 * with DMA-able buffers).
2452 * All dhd_dma_buf_t objects are also allocated here.
2454 * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2455 * initialization of objects that requires information advertized by the dongle
2456 * may not be performed here.
2457 * E.g. the number of TxPost flowrings is not know at this point, neither do
2458 * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2459 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2460 * rings (common + flow).
2462 * dhd_prot_init() is invoked after the bus layer has fetched the information
2463 * advertized by the dongle in the pcie_shared_t.
2466 dhd_prot_attach(dhd_pub_t
*dhd
)
2468 osl_t
*osh
= dhd
->osh
;
2471 /* FW going to DMA extended trap data,
2472 * allocate buffer for the maximum extended trap data.
2475 uint32 trap_buf_len
= BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN
;
2477 uint32 trap_buf_len
= BCMPCIE_EXT_TRAP_DATA_MAXLEN
;
2478 #endif /* D2H_MINIDUMP */
2480 /* Allocate prot structure */
2481 if (!(prot
= (dhd_prot_t
*)DHD_OS_PREALLOC(dhd
, DHD_PREALLOC_PROT
,
2482 sizeof(dhd_prot_t
)))) {
2483 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__
));
2486 memset(prot
, 0, sizeof(*prot
));
2491 /* DMAing ring completes supported? FALSE by default */
2492 dhd
->dma_d2h_ring_upd_support
= FALSE
;
2493 dhd
->dma_h2d_ring_upd_support
= FALSE
;
2494 dhd
->dma_ring_upd_overwrite
= FALSE
;
2496 dhd
->idma_inited
= 0;
2497 dhd
->ifrm_inited
= 0;
2498 dhd
->dar_inited
= 0;
2500 /* Common Ring Allocations */
2502 /* Ring 0: H2D Control Submission */
2503 if (dhd_prot_ring_attach(dhd
, &prot
->h2dring_ctrl_subn
, "h2dctrl",
2504 H2DRING_CTRL_SUB_MAX_ITEM
, H2DRING_CTRL_SUB_ITEMSIZE
,
2505 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT
) != BCME_OK
) {
2506 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2511 /* Ring 1: H2D Receive Buffer Post */
2512 if (dhd_prot_ring_attach(dhd
, &prot
->h2dring_rxp_subn
, "h2drxp",
2513 H2DRING_RXPOST_MAX_ITEM
, H2DRING_RXPOST_ITEMSIZE
,
2514 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT
) != BCME_OK
) {
2515 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2520 /* Ring 2: D2H Control Completion */
2521 if (dhd_prot_ring_attach(dhd
, &prot
->d2hring_ctrl_cpln
, "d2hctrl",
2522 D2HRING_CTRL_CMPLT_MAX_ITEM
, D2HRING_CTRL_CMPLT_ITEMSIZE
,
2523 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE
) != BCME_OK
) {
2524 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2529 /* Ring 3: D2H Transmit Complete */
2530 if (dhd_prot_ring_attach(dhd
, &prot
->d2hring_tx_cpln
, "d2htxcpl",
2531 D2HRING_TXCMPLT_MAX_ITEM
, D2HRING_TXCMPLT_ITEMSIZE
,
2532 BCMPCIE_D2H_MSGRING_TX_COMPLETE
) != BCME_OK
) {
2533 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2539 /* Ring 4: D2H Receive Complete */
2540 if (dhd_prot_ring_attach(dhd
, &prot
->d2hring_rx_cpln
, "d2hrxcpl",
2541 D2HRING_RXCMPLT_MAX_ITEM
, D2HRING_RXCMPLT_ITEMSIZE
,
2542 BCMPCIE_D2H_MSGRING_RX_COMPLETE
) != BCME_OK
) {
2543 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2550 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2551 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2552 * See dhd_prot_flowrings_pool_attach()
2554 /* ioctl response buffer */
2555 if (dhd_dma_buf_alloc(dhd
, &prot
->retbuf
, IOCT_RETBUF_SIZE
)) {
2559 /* IOCTL request buffer */
2560 if (dhd_dma_buf_alloc(dhd
, &prot
->ioctbuf
, IOCT_RETBUF_SIZE
)) {
2564 /* Host TS request buffer one buffer for now */
2565 if (dhd_dma_buf_alloc(dhd
, &prot
->hostts_req_buf
, CTRLSUB_HOSTTS_MEESAGE_SIZE
)) {
2568 prot
->hostts_req_buf_inuse
= FALSE
;
2570 /* Scratch buffer for dma rx offset */
2572 if (dhd_dma_buf_alloc(dhd
, &prot
->d2h_dma_scratch_buf
,
2573 ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN
, 16) + DMA_HOST_BUFFER_LEN
)) {
2575 if (dhd_dma_buf_alloc(dhd
, &prot
->d2h_dma_scratch_buf
, DMA_D2H_SCRATCH_BUF_LEN
)) {
2577 #endif /* BCM_HOST_BUF */
2582 /* scratch buffer bus throughput measurement */
2583 if (dhd_dma_buf_alloc(dhd
, &prot
->host_bus_throughput_buf
, DHD_BUS_TPUT_BUF_LEN
)) {
2587 #ifdef DHD_RX_CHAINING
2588 dhd_rxchain_reset(&prot
->rxchain
);
2591 prot
->pktid_ctrl_map
= DHD_NATIVE_TO_PKTID_INIT(dhd
, MAX_CTRL_PKTID
);
2592 if (prot
->pktid_ctrl_map
== NULL
) {
2596 prot
->pktid_rx_map
= DHD_NATIVE_TO_PKTID_INIT(dhd
, MAX_RX_PKTID
);
2597 if (prot
->pktid_rx_map
== NULL
)
2600 prot
->pktid_tx_map
= DHD_NATIVE_TO_PKTID_INIT(dhd
, MAX_TX_PKTID
);
2601 if (prot
->pktid_tx_map
== NULL
)
2604 #ifdef IOCTLRESP_USE_CONSTMEM
2605 prot
->pktid_map_handle_ioctl
= DHD_NATIVE_TO_PKTID_INIT(dhd
,
2606 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST
);
2607 if (prot
->pktid_map_handle_ioctl
== NULL
) {
2610 #endif /* IOCTLRESP_USE_CONSTMEM */
2612 #ifdef DHD_MAP_PKTID_LOGGING
2613 prot
->pktid_dma_map
= DHD_PKTID_LOG_INIT(dhd
, MAX_PKTID_LOG
);
2614 if (prot
->pktid_dma_map
== NULL
) {
2615 DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
2619 prot
->pktid_dma_unmap
= DHD_PKTID_LOG_INIT(dhd
, MAX_PKTID_LOG
);
2620 if (prot
->pktid_dma_unmap
== NULL
) {
2621 DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
2624 #endif /* DHD_MAP_PKTID_LOGGING */
2626 /* Initialize the work queues to be used by the Load Balancing logic */
2627 #if defined(DHD_LB_TXC)
2630 buffer
= MALLOC(dhd
->osh
, sizeof(void*) * DHD_LB_WORKQ_SZ
);
2631 if (buffer
== NULL
) {
2632 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__
));
2635 bcm_workq_init(&prot
->tx_compl_prod
, &prot
->tx_compl_cons
,
2636 buffer
, DHD_LB_WORKQ_SZ
);
2637 prot
->tx_compl_prod_sync
= 0;
2638 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2639 __FUNCTION__
, buffer
, DHD_LB_WORKQ_SZ
));
2641 #endif /* DHD_LB_TXC */
2643 #if defined(DHD_LB_RXC)
2646 buffer
= MALLOC(dhd
->osh
, sizeof(void*) * DHD_LB_WORKQ_SZ
);
2647 if (buffer
== NULL
) {
2648 DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__
));
2651 bcm_workq_init(&prot
->rx_compl_prod
, &prot
->rx_compl_cons
,
2652 buffer
, DHD_LB_WORKQ_SZ
);
2653 prot
->rx_compl_prod_sync
= 0;
2654 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2655 __FUNCTION__
, buffer
, DHD_LB_WORKQ_SZ
));
2657 #endif /* DHD_LB_RXC */
2659 /* Initialize trap buffer */
2660 if (dhd_dma_buf_alloc(dhd
, &dhd
->prot
->fw_trap_buf
, trap_buf_len
)) {
2661 DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__
));
2670 /* Free up all allocated memories */
2671 dhd_prot_detach(dhd
);
2675 } /* dhd_prot_attach */
2678 dhd_set_host_cap(dhd_pub_t
*dhd
)
2681 dhd_prot_t
*prot
= dhd
->prot
;
2683 uint16 host_trap_addr_len
;
2684 #endif /* D2H_MINIDUMP */
2686 if (dhd
->bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) {
2687 if (dhd
->h2d_phase_supported
) {
2688 data
|= HOSTCAP_H2D_VALID_PHASE
;
2689 if (dhd
->force_dongletrap_on_bad_h2d_phase
)
2690 data
|= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE
;
2692 if (prot
->host_ipc_version
> prot
->device_ipc_version
)
2693 prot
->active_ipc_version
= prot
->device_ipc_version
;
2695 prot
->active_ipc_version
= prot
->host_ipc_version
;
2697 data
|= prot
->active_ipc_version
;
2699 if (dhdpcie_bus_get_pcie_hostready_supported(dhd
->bus
)) {
2700 DHD_INFO(("Advertise Hostready Capability\n"));
2701 data
|= HOSTCAP_H2D_ENABLE_HOSTRDY
;
2704 /* Disable DS altogether */
2705 data
|= HOSTCAP_DS_NO_OOB_DW
;
2706 dhdpcie_bus_enab_pcie_dw(dhd
->bus
, DEVICE_WAKE_NONE
);
2709 /* Indicate support for extended trap data */
2710 data
|= HOSTCAP_EXTENDED_TRAP_DATA
;
2712 /* Indicate support for TX status metadata */
2713 if (dhd
->pcie_txs_metadata_enable
!= 0)
2714 data
|= HOSTCAP_TXSTATUS_METADATA
;
2716 /* Enable fast delete ring in firmware if supported */
2717 if (dhd
->fast_delete_ring_support
) {
2718 data
|= HOSTCAP_FAST_DELETE_RING
;
2721 if (dhdpcie_bus_get_pcie_idma_supported(dhd
->bus
)) {
2722 DHD_ERROR(("IDMA inited\n"));
2723 data
|= HOSTCAP_H2D_IDMA
;
2724 dhd
->idma_inited
= TRUE
;
2727 if (dhdpcie_bus_get_pcie_ifrm_supported(dhd
->bus
)) {
2728 DHD_ERROR(("IFRM Inited\n"));
2729 data
|= HOSTCAP_H2D_IFRM
;
2730 dhd
->ifrm_inited
= TRUE
;
2731 dhd
->dma_h2d_ring_upd_support
= FALSE
;
2732 dhd_prot_dma_indx_free(dhd
);
2735 if (dhdpcie_bus_get_pcie_dar_supported(dhd
->bus
)) {
2736 DHD_ERROR(("DAR doorbell Use\n"));
2737 data
|= HOSTCAP_H2D_DAR
;
2738 dhd
->dar_inited
= TRUE
;
2741 data
|= HOSTCAP_UR_FW_NO_TRAP
;
2744 if (dhd_bus_is_minidump_enabled(dhd
)) {
2745 data
|= HOSTCAP_EXT_TRAP_DBGBUF
;
2746 DHD_ERROR(("ALLOW D2H MINIDUMP!!\n"));
2748 #endif /* D2H_MINIDUMP */
2749 DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
2751 prot
->active_ipc_version
, prot
->host_ipc_version
,
2752 prot
->device_ipc_version
));
2754 dhd_bus_cmn_writeshared(dhd
->bus
, &data
, sizeof(uint32
), HOST_API_VERSION
, 0);
2755 dhd_bus_cmn_writeshared(dhd
->bus
, &prot
->fw_trap_buf
.pa
,
2756 sizeof(prot
->fw_trap_buf
.pa
), DNGL_TO_HOST_TRAP_ADDR
, 0);
2758 if (dhd_bus_is_minidump_enabled(dhd
)) {
2759 /* Dongle expects the host_trap_addr_len in terms of words */
2760 host_trap_addr_len
= BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN
/ 4;
2761 dhd_bus_cmn_writeshared(dhd
->bus
, &host_trap_addr_len
,
2762 sizeof(host_trap_addr_len
), DNGL_TO_HOST_TRAP_ADDR_LEN
, 0);
2764 #endif /* D2H_MINIDUMP */
2771 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2772 * completed it's initialization of the pcie_shared structure, we may now fetch
2773 * the dongle advertized features and adjust the protocol layer accordingly.
2775 * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2778 dhd_prot_init(dhd_pub_t
*dhd
)
2780 sh_addr_t base_addr
;
2781 dhd_prot_t
*prot
= dhd
->prot
;
2784 uint32 waitcount
= 0;
2787 dhd
->monitor_enable
= FALSE
;
2788 #endif /* WL_MONITOR */
2791 * A user defined value can be assigned to global variable h2d_max_txpost via
2792 * 1. DHD IOVAR h2d_max_txpost, before firmware download
2793 * 2. module parameter h2d_max_txpost
2794 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
2795 * if user has not defined any buffers by one of the above methods.
2797 prot
->h2d_max_txpost
= (uint16
)h2d_max_txpost
;
2799 DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__
, __LINE__
, prot
->h2d_max_txpost
));
2801 /* Read max rx packets supported by dongle */
2802 dhd_bus_cmn_readshared(dhd
->bus
, &prot
->max_rxbufpost
, MAX_HOST_RXBUFS
, 0);
2803 if (prot
->max_rxbufpost
== 0) {
2804 /* This would happen if the dongle firmware is not */
2805 /* using the latest shared structure template */
2806 prot
->max_rxbufpost
= DEFAULT_RX_BUFFERS_TO_POST
;
2808 DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__
, __LINE__
, prot
->max_rxbufpost
));
2810 /* Initialize. bzero() would blow away the dma pointers. */
2811 prot
->max_eventbufpost
= DHD_FLOWRING_MAX_EVENTBUF_POST
;
2812 prot
->max_ioctlrespbufpost
= DHD_FLOWRING_MAX_IOCTLRESPBUF_POST
;
2813 prot
->max_infobufpost
= DHD_H2D_INFORING_MAX_BUF_POST
;
2814 prot
->max_tsbufpost
= DHD_MAX_TSBUF_POST
;
2816 prot
->cur_ioctlresp_bufs_posted
= 0;
2817 OSL_ATOMIC_INIT(dhd
->osh
, &prot
->active_tx_count
);
2818 prot
->data_seq_no
= 0;
2819 prot
->ioctl_seq_no
= 0;
2820 prot
->rxbufpost
= 0;
2821 prot
->cur_event_bufs_posted
= 0;
2822 prot
->ioctl_state
= 0;
2823 prot
->curr_ioctl_cmd
= 0;
2824 prot
->cur_ts_bufs_posted
= 0;
2825 prot
->infobufpost
= 0;
2827 prot
->dmaxfer
.srcmem
.va
= NULL
;
2828 prot
->dmaxfer
.dstmem
.va
= NULL
;
2829 prot
->dmaxfer
.in_progress
= FALSE
;
2831 prot
->metadata_dbg
= FALSE
;
2832 prot
->rx_metadata_offset
= 0;
2833 prot
->tx_metadata_offset
= 0;
2834 prot
->txp_threshold
= TXP_FLUSH_MAX_ITEMS_FLUSH_CNT
;
2836 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2837 prot
->ioctl_trans_id
= MAXBITVAL(NBITS(prot
->ioctl_trans_id
)) - BUFFER_BEFORE_ROLLOVER
;
2838 prot
->ioctl_state
= 0;
2839 prot
->ioctl_status
= 0;
2840 prot
->ioctl_resplen
= 0;
2841 prot
->ioctl_received
= IOCTL_WAIT
;
2843 /* Initialize Common MsgBuf Rings */
2845 prot
->device_ipc_version
= dhd
->bus
->api
.fw_rev
;
2846 prot
->host_ipc_version
= PCIE_SHARED_VERSION
;
2848 /* Init the host API version */
2849 dhd_set_host_cap(dhd
);
2851 /* Register the interrupt function upfront */
2852 /* remove corerev checks in data path */
2853 /* do this after host/fw negotiation for DAR */
2854 prot
->mb_ring_fn
= dhd_bus_get_mbintr_fn(dhd
->bus
);
2855 prot
->mb_2_ring_fn
= dhd_bus_get_mbintr_2_fn(dhd
->bus
);
2857 dhd
->bus
->_dar_war
= (dhd
->bus
->sih
->buscorerev
< 64) ? TRUE
: FALSE
;
2859 dhd_prot_ring_init(dhd
, &prot
->h2dring_ctrl_subn
);
2860 dhd_prot_ring_init(dhd
, &prot
->h2dring_rxp_subn
);
2861 dhd_prot_ring_init(dhd
, &prot
->d2hring_ctrl_cpln
);
2863 /* Make it compatibile with pre-rev7 Firmware */
2864 if (prot
->active_ipc_version
< PCIE_SHARED_VERSION_7
) {
2865 prot
->d2hring_tx_cpln
.item_len
=
2866 D2HRING_TXCMPLT_ITEMSIZE_PREREV7
;
2867 prot
->d2hring_rx_cpln
.item_len
=
2868 D2HRING_RXCMPLT_ITEMSIZE_PREREV7
;
2870 dhd_prot_ring_init(dhd
, &prot
->d2hring_tx_cpln
);
2871 dhd_prot_ring_init(dhd
, &prot
->d2hring_rx_cpln
);
2873 dhd_prot_d2h_sync_init(dhd
);
2875 dhd_prot_h2d_sync_init(dhd
);
2877 /* init the scratch buffer */
2878 dhd_base_addr_htolpa(&base_addr
, prot
->d2h_dma_scratch_buf
.pa
);
2879 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2880 D2H_DMA_SCRATCH_BUF
, 0);
2881 dhd_bus_cmn_writeshared(dhd
->bus
, &prot
->d2h_dma_scratch_buf
.len
,
2882 sizeof(prot
->d2h_dma_scratch_buf
.len
), D2H_DMA_SCRATCH_BUF_LEN
, 0);
2884 /* If supported by the host, indicate the memory block
2885 * for completion writes / submission reads to shared space
2887 if (dhd
->dma_d2h_ring_upd_support
) {
2888 dhd_base_addr_htolpa(&base_addr
, prot
->d2h_dma_indx_wr_buf
.pa
);
2889 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2890 D2H_DMA_INDX_WR_BUF
, 0);
2891 dhd_base_addr_htolpa(&base_addr
, prot
->h2d_dma_indx_rd_buf
.pa
);
2892 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2893 H2D_DMA_INDX_RD_BUF
, 0);
2896 if (dhd
->dma_h2d_ring_upd_support
|| IDMA_ENAB(dhd
)) {
2897 dhd_base_addr_htolpa(&base_addr
, prot
->h2d_dma_indx_wr_buf
.pa
);
2898 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2899 H2D_DMA_INDX_WR_BUF
, 0);
2900 dhd_base_addr_htolpa(&base_addr
, prot
->d2h_dma_indx_rd_buf
.pa
);
2901 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2902 D2H_DMA_INDX_RD_BUF
, 0);
2905 /* Signal to the dongle that common ring init is complete */
2906 dhd_bus_hostready(dhd
->bus
);
2909 * If the DMA-able buffers for flowring needs to come from a specific
2910 * contiguous memory region, then setup prot->flowrings_dma_buf here.
2911 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2912 * this contiguous memory region, for each of the flowrings.
2915 /* Pre-allocate pool of msgbuf_ring for flowrings */
2916 if (dhd_prot_flowrings_pool_attach(dhd
) != BCME_OK
) {
2920 /* If IFRM is enabled, wait for FW to setup the DMA channel */
2921 if (IFRM_ENAB(dhd
)) {
2922 dhd_base_addr_htolpa(&base_addr
, prot
->h2d_ifrm_indx_wr_buf
.pa
);
2923 dhd_bus_cmn_writeshared(dhd
->bus
, &base_addr
, sizeof(base_addr
),
2924 H2D_IFRM_INDX_WR_BUF
, 0);
2927 /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
2928 * Waiting just before configuring doorbell
2930 #define IDMA_ENABLE_WAIT 10
2931 if (IDMA_ACTIVE(dhd
)) {
2932 /* wait for idma_en bit in IDMAcontrol register to be set */
2933 /* Loop till idma_en is not set */
2934 uint buscorerev
= dhd
->bus
->sih
->buscorerev
;
2935 idmacontrol
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
2936 IDMAControl(buscorerev
), 0, 0);
2937 while (!(idmacontrol
& PCIE_IDMA_MODE_EN(buscorerev
)) &&
2938 (waitcount
++ < IDMA_ENABLE_WAIT
)) {
2940 DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
2941 waitcount
, idmacontrol
));
2942 OSL_DELAY(1000); /* 1ms as its onetime only */
2943 idmacontrol
= si_corereg(dhd
->bus
->sih
, dhd
->bus
->sih
->buscoreidx
,
2944 IDMAControl(buscorerev
), 0, 0);
2947 if (waitcount
< IDMA_ENABLE_WAIT
) {
2948 DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol
));
2950 DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
2951 waitcount
, idmacontrol
));
2956 /* Host should configure soft doorbells if needed ... here */
2958 /* Post to dongle host configured soft doorbells */
2959 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd
);
2961 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd
);
2962 dhd_msgbuf_rxbuf_post_event_bufs(dhd
);
2964 prot
->no_retry
= FALSE
;
2965 prot
->no_aggr
= FALSE
;
2966 prot
->fixed_rate
= FALSE
;
2969 * Note that any communication with the Dongle should be added
2970 * below this point. Any other host data structure initialiation that
2971 * needs to be done prior to the DPC starts executing should be done
2973 * Because once we start sending H2D requests to Dongle, the Dongle
2974 * respond immediately. So the DPC context to handle this
2975 * D2H response could preempt the context in which dhd_prot_init is running.
2976 * We want to ensure that all the Host part of dhd_prot_init is
2980 /* See if info rings could be created */
2981 if (dhd
->bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
) {
2982 if ((ret
= dhd_prot_init_info_rings(dhd
)) != BCME_OK
) {
2983 /* For now log and proceed, further clean up action maybe necessary
2984 * when we have more clarity.
2986 DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
2987 __FUNCTION__
, ret
));
2992 } /* dhd_prot_init */
2995 * dhd_prot_detach - PCIE FD protocol layer destructor.
2996 * Unlink, frees allocated protocol memory (including dhd_prot)
2998 void dhd_prot_detach(dhd_pub_t
*dhd
)
3000 dhd_prot_t
*prot
= dhd
->prot
;
3002 /* Stop the protocol module */
3005 /* free up all DMA-able buffers allocated during prot attach/init */
3007 dhd_dma_buf_free(dhd
, &prot
->d2h_dma_scratch_buf
);
3008 dhd_dma_buf_free(dhd
, &prot
->retbuf
);
3009 dhd_dma_buf_free(dhd
, &prot
->ioctbuf
);
3010 dhd_dma_buf_free(dhd
, &prot
->host_bus_throughput_buf
);
3011 dhd_dma_buf_free(dhd
, &prot
->hostts_req_buf
);
3012 dhd_dma_buf_free(dhd
, &prot
->fw_trap_buf
);
3014 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3015 dhd_dma_buf_free(dhd
, &prot
->h2d_dma_indx_wr_buf
);
3016 dhd_dma_buf_free(dhd
, &prot
->h2d_dma_indx_rd_buf
);
3017 dhd_dma_buf_free(dhd
, &prot
->d2h_dma_indx_wr_buf
);
3018 dhd_dma_buf_free(dhd
, &prot
->d2h_dma_indx_rd_buf
);
3020 dhd_dma_buf_free(dhd
, &prot
->h2d_ifrm_indx_wr_buf
);
3022 /* Common MsgBuf Rings */
3023 dhd_prot_ring_detach(dhd
, &prot
->h2dring_ctrl_subn
);
3024 dhd_prot_ring_detach(dhd
, &prot
->h2dring_rxp_subn
);
3025 dhd_prot_ring_detach(dhd
, &prot
->d2hring_ctrl_cpln
);
3026 dhd_prot_ring_detach(dhd
, &prot
->d2hring_tx_cpln
);
3027 dhd_prot_ring_detach(dhd
, &prot
->d2hring_rx_cpln
);
3029 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
3030 dhd_prot_flowrings_pool_detach(dhd
);
3032 /* detach info rings */
3033 dhd_prot_detach_info_rings(dhd
);
3035 /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
3036 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
3037 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
3038 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
3039 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
3040 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
3041 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
3042 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
3044 DHD_NATIVE_TO_PKTID_FINI(dhd
, prot
->pktid_ctrl_map
);
3045 DHD_NATIVE_TO_PKTID_FINI(dhd
, prot
->pktid_rx_map
);
3046 DHD_NATIVE_TO_PKTID_FINI(dhd
, prot
->pktid_tx_map
);
3047 #ifdef IOCTLRESP_USE_CONSTMEM
3048 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd
, prot
->pktid_map_handle_ioctl
);
3050 #ifdef DHD_MAP_PKTID_LOGGING
3051 DHD_PKTID_LOG_FINI(dhd
, prot
->pktid_dma_map
);
3052 DHD_PKTID_LOG_FINI(dhd
, prot
->pktid_dma_unmap
);
3053 #endif /* DHD_MAP_PKTID_LOGGING */
3055 #if defined(DHD_LB_TXC)
3056 if (prot
->tx_compl_prod
.buffer
)
3057 MFREE(dhd
->osh
, prot
->tx_compl_prod
.buffer
,
3058 sizeof(void*) * DHD_LB_WORKQ_SZ
);
3059 #endif /* DHD_LB_TXC */
3060 #if defined(DHD_LB_RXC)
3061 if (prot
->rx_compl_prod
.buffer
)
3062 MFREE(dhd
->osh
, prot
->rx_compl_prod
.buffer
,
3063 sizeof(void*) * DHD_LB_WORKQ_SZ
);
3064 #endif /* DHD_LB_RXC */
3066 DHD_OS_PREFREE(dhd
, dhd
->prot
, sizeof(dhd_prot_t
));
3070 } /* dhd_prot_detach */
3073 * dhd_prot_reset - Reset the protocol layer without freeing any objects.
3074 * This may be invoked to soft reboot the dongle, without having to
3075 * detach and attach the entire protocol layer.
3077 * After dhd_prot_reset(), dhd_prot_init() may be invoked
3078 * without going througha dhd_prot_attach() phase.
3081 dhd_prot_reset(dhd_pub_t
*dhd
)
3083 struct dhd_prot
*prot
= dhd
->prot
;
3085 DHD_TRACE(("%s\n", __FUNCTION__
));
3091 dhd_prot_flowrings_pool_reset(dhd
);
3093 /* Reset Common MsgBuf Rings */
3094 dhd_prot_ring_reset(dhd
, &prot
->h2dring_ctrl_subn
);
3095 dhd_prot_ring_reset(dhd
, &prot
->h2dring_rxp_subn
);
3096 dhd_prot_ring_reset(dhd
, &prot
->d2hring_ctrl_cpln
);
3097 dhd_prot_ring_reset(dhd
, &prot
->d2hring_tx_cpln
);
3098 dhd_prot_ring_reset(dhd
, &prot
->d2hring_rx_cpln
);
3100 /* Reset info rings */
3101 if (prot
->h2dring_info_subn
) {
3102 dhd_prot_ring_reset(dhd
, prot
->h2dring_info_subn
);
3105 if (prot
->d2hring_info_cpln
) {
3106 dhd_prot_ring_reset(dhd
, prot
->d2hring_info_cpln
);
3109 /* Reset all DMA-able buffers allocated during prot attach */
3110 dhd_dma_buf_reset(dhd
, &prot
->d2h_dma_scratch_buf
);
3111 dhd_dma_buf_reset(dhd
, &prot
->retbuf
);
3112 dhd_dma_buf_reset(dhd
, &prot
->ioctbuf
);
3113 dhd_dma_buf_reset(dhd
, &prot
->host_bus_throughput_buf
);
3114 dhd_dma_buf_reset(dhd
, &prot
->hostts_req_buf
);
3115 dhd_dma_buf_reset(dhd
, &prot
->fw_trap_buf
);
3117 dhd_dma_buf_reset(dhd
, &prot
->h2d_ifrm_indx_wr_buf
);
3119 /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
3120 dhd_dma_buf_reset(dhd
, &prot
->h2d_dma_indx_rd_buf
);
3121 dhd_dma_buf_reset(dhd
, &prot
->h2d_dma_indx_wr_buf
);
3122 dhd_dma_buf_reset(dhd
, &prot
->d2h_dma_indx_rd_buf
);
3123 dhd_dma_buf_reset(dhd
, &prot
->d2h_dma_indx_wr_buf
);
3125 prot
->rx_metadata_offset
= 0;
3126 prot
->tx_metadata_offset
= 0;
3128 prot
->rxbufpost
= 0;
3129 prot
->cur_event_bufs_posted
= 0;
3130 prot
->cur_ioctlresp_bufs_posted
= 0;
3132 OSL_ATOMIC_INIT(dhd
->osh
, &prot
->active_tx_count
);
3133 prot
->data_seq_no
= 0;
3134 prot
->ioctl_seq_no
= 0;
3135 prot
->ioctl_state
= 0;
3136 prot
->curr_ioctl_cmd
= 0;
3137 prot
->ioctl_received
= IOCTL_WAIT
;
3138 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
3139 prot
->ioctl_trans_id
= MAXBITVAL(NBITS(prot
->ioctl_trans_id
)) - BUFFER_BEFORE_ROLLOVER
;
3141 /* dhd_flow_rings_init is located at dhd_bus_start,
3142 * so when stopping bus, flowrings shall be deleted
3144 if (dhd
->flow_rings_inited
) {
3145 dhd_flow_rings_deinit(dhd
);
3148 /* Reset PKTID map */
3149 DHD_NATIVE_TO_PKTID_RESET(dhd
, prot
->pktid_ctrl_map
);
3150 DHD_NATIVE_TO_PKTID_RESET(dhd
, prot
->pktid_rx_map
);
3151 DHD_NATIVE_TO_PKTID_RESET(dhd
, prot
->pktid_tx_map
);
3152 #ifdef IOCTLRESP_USE_CONSTMEM
3153 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd
, prot
->pktid_map_handle_ioctl
);
3154 #endif /* IOCTLRESP_USE_CONSTMEM */
3156 dhd
->dma_stats
.txdata
= dhd
->dma_stats
.txdata_sz
= 0;
3157 dhd
->dma_stats
.rxdata
= dhd
->dma_stats
.rxdata_sz
= 0;
3158 #ifndef IOCTLRESP_USE_CONSTMEM
3159 dhd
->dma_stats
.ioctl_rx
= dhd
->dma_stats
.ioctl_rx_sz
= 0;
3160 #endif /* IOCTLRESP_USE_CONSTMEM */
3161 dhd
->dma_stats
.event_rx
= dhd
->dma_stats
.event_rx_sz
= 0;
3162 dhd
->dma_stats
.info_rx
= dhd
->dma_stats
.info_rx_sz
= 0;
3163 dhd
->dma_stats
.tsbuf_rx
= dhd
->dma_stats
.tsbuf_rx_sz
= 0;
3164 #endif /* DMAMAP_STATS */
3165 } /* dhd_prot_reset */
3167 #if defined(DHD_LB_RXP)
3168 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
3169 #else /* !DHD_LB_RXP */
3170 #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
3171 #endif /* !DHD_LB_RXP */
3173 #if defined(DHD_LB_RXC)
3174 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
3175 #else /* !DHD_LB_RXC */
3176 #define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
3177 #endif /* !DHD_LB_RXC */
3179 #if defined(DHD_LB_TXC)
3180 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
3181 #else /* !DHD_LB_TXC */
3182 #define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
3183 #endif /* !DHD_LB_TXC */
3186 /* DHD load balancing: deferral of work to another online CPU */
3187 /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
3188 extern void dhd_lb_tx_compl_dispatch(dhd_pub_t
*dhdp
);
3189 extern void dhd_lb_rx_compl_dispatch(dhd_pub_t
*dhdp
);
3190 extern void dhd_lb_rx_napi_dispatch(dhd_pub_t
*dhdp
);
3191 extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t
*dhdp
, void *pkt
, int ifidx
);
3193 #if defined(DHD_LB_RXP)
3195 * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
3196 * to other CPU cores
3199 dhd_lb_dispatch_rx_process(dhd_pub_t
*dhdp
)
3201 dhd_lb_rx_napi_dispatch(dhdp
); /* dispatch rx_process_napi */
3203 #endif /* DHD_LB_RXP */
3205 #if defined(DHD_LB_TXC)
3207 * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
3208 * to other CPU cores
3211 dhd_lb_dispatch_tx_compl(dhd_pub_t
*dhdp
, uint16 ring_idx
)
3213 bcm_workq_prod_sync(&dhdp
->prot
->tx_compl_prod
); /* flush WR index */
3214 dhd_lb_tx_compl_dispatch(dhdp
); /* dispatch tx_compl_tasklet */
3218 * DHD load balanced tx completion tasklet handler, that will perform the
3219 * freeing of packets on the selected CPU. Packet pointers are delivered to
3220 * this tasklet via the tx complete workq.
3223 dhd_lb_tx_compl_handler(unsigned long data
)
3229 dhd_pub_t
*dhd
= (dhd_pub_t
*)data
;
3230 dhd_prot_t
*prot
= dhd
->prot
;
3231 bcm_workq_t
*workq
= &prot
->tx_compl_cons
;
3235 curr_cpu
= get_cpu();
3238 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd
);
3241 elem_ix
= bcm_ring_cons(WORKQ_RING(workq
), DHD_LB_WORKQ_SZ
);
3243 if (elem_ix
== BCM_RING_EMPTY
) {
3247 elem
= WORKQ_ELEMENT(void *, workq
, elem_ix
);
3250 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__
, pkt
));
3252 OSL_PREFETCH(PKTTAG(pkt
));
3255 pa
= DHD_PKTTAG_PA((dhd_pkttag_fr_t
*)PKTTAG(pkt
));
3256 pa_len
= DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t
*)PKTTAG(pkt
));
3258 DMA_UNMAP(dhd
->osh
, pa
, pa_len
, DMA_RX
, 0, 0);
3259 #if defined(BCMPCIE)
3260 dhd_txcomplete(dhd
, pkt
, true);
3263 PKTFREE(dhd
->osh
, pkt
, TRUE
);
3268 bcm_workq_cons_sync(workq
);
3269 DHD_LB_STATS_UPDATE_TXC_HISTO(dhd
, count
);
3271 #endif /* DHD_LB_TXC */
3273 #if defined(DHD_LB_RXC)
3276 * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
3277 * to other CPU cores
3280 dhd_lb_dispatch_rx_compl(dhd_pub_t
*dhdp
)
3282 dhd_prot_t
*prot
= dhdp
->prot
;
3283 /* Schedule the takslet only if we have to */
3284 if (prot
->rxbufpost
<= (prot
->max_rxbufpost
- RXBUFPOST_THRESHOLD
)) {
3285 /* flush WR index */
3286 bcm_workq_prod_sync(&dhdp
->prot
->rx_compl_prod
);
3287 dhd_lb_rx_compl_dispatch(dhdp
); /* dispatch rx_compl_tasklet */
3292 dhd_lb_rx_compl_handler(unsigned long data
)
3294 dhd_pub_t
*dhd
= (dhd_pub_t
*)data
;
3295 bcm_workq_t
*workq
= &dhd
->prot
->rx_compl_cons
;
3297 DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd
);
3299 dhd_msgbuf_rxbuf_post(dhd
, TRUE
); /* re-use pktids */
3300 bcm_workq_cons_sync(workq
);
3302 #endif /* DHD_LB_RXC */
3306 dhd_prot_rx_dataoffset(dhd_pub_t
*dhd
, uint32 rx_offset
)
3308 dhd_prot_t
*prot
= dhd
->prot
;
3309 prot
->rx_dataoffset
= rx_offset
;
3313 dhd_check_create_info_rings(dhd_pub_t
*dhd
)
3315 dhd_prot_t
*prot
= dhd
->prot
;
3316 int ret
= BCME_ERROR
;
3320 /* dongle may increase max_submission_rings so keep
3321 * ringid at end of dynamic rings
3323 ringid
= dhd
->bus
->max_tx_flowrings
+
3324 (dhd
->bus
->max_submission_rings
- dhd
->bus
->max_tx_flowrings
) +
3325 BCMPCIE_H2D_COMMON_MSGRINGS
;
3328 if (prot
->h2dring_info_subn
&& prot
->d2hring_info_cpln
) {
3329 return BCME_OK
; /* dhd_prot_init rentry after a dhd_prot_reset */
3332 if (prot
->h2dring_info_subn
== NULL
) {
3333 prot
->h2dring_info_subn
= MALLOCZ(prot
->osh
, sizeof(msgbuf_ring_t
));
3335 if (prot
->h2dring_info_subn
== NULL
) {
3336 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3341 DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__
));
3342 ret
= dhd_prot_ring_attach(dhd
, prot
->h2dring_info_subn
, "h2dinfo",
3343 H2DRING_DYNAMIC_INFO_MAX_ITEM
, H2DRING_INFO_BUFPOST_ITEMSIZE
,
3345 if (ret
!= BCME_OK
) {
3346 DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
3352 if (prot
->d2hring_info_cpln
== NULL
) {
3353 prot
->d2hring_info_cpln
= MALLOCZ(prot
->osh
, sizeof(msgbuf_ring_t
));
3355 if (prot
->d2hring_info_cpln
== NULL
) {
3356 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
3361 /* create the debug info completion ring next to debug info submit ring
3362 * ringid = id next to debug info submit ring
3364 ringid
= ringid
+ 1;
3366 DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__
));
3367 ret
= dhd_prot_ring_attach(dhd
, prot
->d2hring_info_cpln
, "d2hinfo",
3368 D2HRING_DYNAMIC_INFO_MAX_ITEM
, D2HRING_INFO_BUFCMPLT_ITEMSIZE
,
3370 if (ret
!= BCME_OK
) {
3371 DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3373 dhd_prot_ring_detach(dhd
, prot
->h2dring_info_subn
);
3380 MFREE(prot
->osh
, prot
->h2dring_info_subn
, sizeof(msgbuf_ring_t
));
3381 prot
->h2dring_info_subn
= NULL
;
3383 if (prot
->d2hring_info_cpln
) {
3384 MFREE(prot
->osh
, prot
->d2hring_info_cpln
, sizeof(msgbuf_ring_t
));
3385 prot
->d2hring_info_cpln
= NULL
;
3388 } /* dhd_check_create_info_rings */
3391 dhd_prot_init_info_rings(dhd_pub_t
*dhd
)
3393 dhd_prot_t
*prot
= dhd
->prot
;
3396 if ((ret
= dhd_check_create_info_rings(dhd
)) != BCME_OK
) {
3397 DHD_ERROR(("%s: info rings aren't created! \n",
3402 if ((prot
->d2hring_info_cpln
->inited
) || (prot
->d2hring_info_cpln
->create_pending
)) {
3403 DHD_INFO(("Info completion ring was created!\n"));
3407 DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot
->d2hring_info_cpln
->idx
));
3408 ret
= dhd_send_d2h_ringcreate(dhd
, prot
->d2hring_info_cpln
,
3409 BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL
, DHD_D2H_DBGRING_REQ_PKTID
);
3413 prot
->d2hring_info_cpln
->seqnum
= D2H_EPOCH_INIT_VAL
;
3415 DHD_TRACE(("trying to send create h2d info ring id %d\n", prot
->h2dring_info_subn
->idx
));
3416 prot
->h2dring_info_subn
->n_completion_ids
= 1;
3417 prot
->h2dring_info_subn
->compeltion_ring_ids
[0] = prot
->d2hring_info_cpln
->idx
;
3419 ret
= dhd_send_h2d_ringcreate(dhd
, prot
->h2dring_info_subn
,
3420 BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT
, DHD_H2D_DBGRING_REQ_PKTID
);
3422 /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3423 * so can not cleanup if one ring was created while the other failed
3426 } /* dhd_prot_init_info_rings */
3429 dhd_prot_detach_info_rings(dhd_pub_t
*dhd
)
3431 if (dhd
->prot
->h2dring_info_subn
) {
3432 dhd_prot_ring_detach(dhd
, dhd
->prot
->h2dring_info_subn
);
3433 MFREE(dhd
->prot
->osh
, dhd
->prot
->h2dring_info_subn
, sizeof(msgbuf_ring_t
));
3434 dhd
->prot
->h2dring_info_subn
= NULL
;
3436 if (dhd
->prot
->d2hring_info_cpln
) {
3437 dhd_prot_ring_detach(dhd
, dhd
->prot
->d2hring_info_cpln
);
3438 MFREE(dhd
->prot
->osh
, dhd
->prot
->d2hring_info_cpln
, sizeof(msgbuf_ring_t
));
3439 dhd
->prot
->d2hring_info_cpln
= NULL
;
3444 * Initialize protocol: sync w/dongle state.
3445 * Sets dongle media info (iswl, drv_version, mac address).
3447 int dhd_sync_with_dongle(dhd_pub_t
*dhd
)
3450 wlc_rev_info_t revinfo
;
3452 dhd_prot_t
*prot
= dhd
->prot
;
3454 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
3456 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT
);
3458 /* Post ts buffer after shim layer is attached */
3459 ret
= dhd_msgbuf_rxbuf_post_ts_bufs(dhd
);
3461 #ifdef DHD_FW_COREDUMP
3462 /* Check the memdump capability */
3463 dhd_get_memdump_info(dhd
);
3464 #endif /* DHD_FW_COREDUMP */
3465 #ifdef BCMASSERT_LOG
3466 dhd_get_assert_info(dhd
);
3467 #endif /* BCMASSERT_LOG */
3469 /* Get the device rev info */
3470 memset(&revinfo
, 0, sizeof(revinfo
));
3471 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_REVINFO
, &revinfo
, sizeof(revinfo
), FALSE
, 0);
3473 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__
));
3476 DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__
,
3477 revinfo
.deviceid
, revinfo
.vendorid
, revinfo
.chipnum
));
3479 /* Get the RxBuf post size */
3480 memset(buf
, 0, sizeof(buf
));
3481 bcm_mkiovar("rxbufpost_sz", NULL
, 0, buf
, sizeof(buf
));
3482 ret
= dhd_wl_ioctl_cmd(dhd
, WLC_GET_VAR
, buf
, sizeof(buf
), FALSE
, 0);
3484 DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
3485 __FUNCTION__
, DHD_FLOWRING_RX_BUFPOST_PKTSZ
));
3486 prot
->rxbufpost_sz
= DHD_FLOWRING_RX_BUFPOST_PKTSZ
;
3488 memcpy(&(prot
->rxbufpost_sz
), buf
, sizeof(uint16
));
3489 if ((prot
->rxbufpost_sz
< DHD_FLOWRING_RX_BUFPOST_PKTSZ
) ||
3490 (prot
->rxbufpost_sz
> DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX
)) {
3491 DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
3492 __FUNCTION__
, prot
->rxbufpost_sz
, DHD_FLOWRING_RX_BUFPOST_PKTSZ
));
3493 prot
->rxbufpost_sz
= DHD_FLOWRING_RX_BUFPOST_PKTSZ
;
3495 DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__
, prot
->rxbufpost_sz
));
3499 /* Post buffers for packet reception */
3500 dhd_msgbuf_rxbuf_post(dhd
, FALSE
); /* alloc pkt ids */
3502 DHD_SSSR_DUMP_INIT(dhd
);
3504 dhd_process_cid_mac(dhd
, TRUE
);
3505 ret
= dhd_preinit_ioctls(dhd
);
3506 dhd_process_cid_mac(dhd
, FALSE
);
3508 /* Always assumes wl for now */
3512 } /* dhd_sync_with_dongle */
3514 #define DHD_DBG_SHOW_METADATA 0
3516 #if DHD_DBG_SHOW_METADATA
3517 static void BCMFASTPATH
3518 dhd_prot_print_metadata(dhd_pub_t
*dhd
, void *ptr
, int len
)
3522 uint8
*tlv_v
= (uint8
*)ptr
;
3524 if (len
<= BCMPCIE_D2H_METADATA_HDRLEN
)
3527 len
-= BCMPCIE_D2H_METADATA_HDRLEN
;
3528 tlv_v
+= BCMPCIE_D2H_METADATA_HDRLEN
;
3530 while (len
> TLV_HDR_LEN
) {
3531 tlv_t
= tlv_v
[TLV_TAG_OFF
];
3532 tlv_l
= tlv_v
[TLV_LEN_OFF
];
3535 tlv_v
+= TLV_HDR_LEN
;
3538 if ((tlv_t
== 0) || (tlv_t
== WLFC_CTL_TYPE_FILLER
))
3542 case WLFC_CTL_TYPE_TXSTATUS
: {
3544 memcpy(&txs
, tlv_v
, sizeof(uint32
));
3545 if (tlv_l
< (sizeof(wl_txstatus_additional_info_t
) + sizeof(uint32
))) {
3546 printf("METADATA TX_STATUS: %08x\n", txs
);
3548 wl_txstatus_additional_info_t tx_add_info
;
3549 memcpy(&tx_add_info
, tlv_v
+ sizeof(uint32
),
3550 sizeof(wl_txstatus_additional_info_t
));
3551 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
3552 " rate = %08x tries = %d - %d\n", txs
,
3553 tx_add_info
.seq
, tx_add_info
.entry_ts
,
3554 tx_add_info
.enq_ts
, tx_add_info
.last_ts
,
3555 tx_add_info
.rspec
, tx_add_info
.rts_cnt
,
3556 tx_add_info
.tx_cnt
);
3560 case WLFC_CTL_TYPE_RSSI
: {
3562 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v
);
3564 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
3565 (*(tlv_v
+ 3) << 8) | *(tlv_v
+ 2),
3566 (int8
)(*tlv_v
), *(tlv_v
+ 1));
3569 case WLFC_CTL_TYPE_FIFO_CREDITBACK
:
3570 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v
, tlv_l
);
3573 case WLFC_CTL_TYPE_TX_ENTRY_STAMP
:
3574 bcm_print_bytes("METADATA TX_ENTRY", tlv_v
, tlv_l
);
3577 case WLFC_CTL_TYPE_RX_STAMP
: {
3583 memcpy(&rx_tmstamp
, tlv_v
, sizeof(rx_tmstamp
));
3584 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
3585 rx_tmstamp
.wlan_time
, rx_tmstamp
.bus_time
, rx_tmstamp
.rspec
);
3588 case WLFC_CTL_TYPE_TRANS_ID
:
3589 bcm_print_bytes("METADATA TRANS_ID", tlv_v
, tlv_l
);
3592 case WLFC_CTL_TYPE_COMP_TXSTATUS
:
3593 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v
, tlv_l
);
3597 bcm_print_bytes("METADATA UNKNOWN", tlv_v
, tlv_l
);
3605 #endif /* DHD_DBG_SHOW_METADATA */
3607 static INLINE
void BCMFASTPATH
3608 dhd_prot_packet_free(dhd_pub_t
*dhd
, void *pkt
, uint8 pkttype
, bool send
)
3611 if (pkttype
== PKTTYPE_IOCTL_RX
||
3612 pkttype
== PKTTYPE_EVENT_RX
||
3613 pkttype
== PKTTYPE_INFO_RX
||
3614 pkttype
== PKTTYPE_TSBUF_RX
) {
3615 #ifdef DHD_USE_STATIC_CTRLBUF
3616 PKTFREE_STATIC(dhd
->osh
, pkt
, send
);
3618 PKTFREE(dhd
->osh
, pkt
, send
);
3619 #endif /* DHD_USE_STATIC_CTRLBUF */
3621 PKTFREE(dhd
->osh
, pkt
, send
);
3627 * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
3628 * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
3629 * to ensure thread safety, so no need to hold any locks for this function
3631 static INLINE
void * BCMFASTPATH
3632 dhd_prot_packet_get(dhd_pub_t
*dhd
, uint32 pktid
, uint8 pkttype
, bool free_pktid
)
3640 #ifdef DHD_PCIE_PKTID
3642 PKTBUF
= DHD_PKTID_TO_NATIVE(dhd
, dhd
->prot
->pktid_ctrl_map
,
3643 pktid
, pa
, len
, dmah
, secdma
, pkttype
);
3645 PKTBUF
= DHD_PKTID_TO_NATIVE_RSV(dhd
, dhd
->prot
->pktid_ctrl_map
,
3646 pktid
, pa
, len
, dmah
, secdma
, pkttype
);
3649 PKTBUF
= DHD_PKTID_TO_NATIVE(dhd
, dhd
->prot
->pktid_ctrl_map
, pktid
, pa
,
3650 len
, dmah
, secdma
, pkttype
);
3651 #endif /* DHD_PCIE_PKTID */
3654 if (SECURE_DMA_ENAB(dhd
->osh
))
3655 SECURE_DMA_UNMAP(dhd
->osh
, pa
, (uint
) len
, DMA_RX
, 0, dmah
,
3658 DMA_UNMAP(dhd
->osh
, pa
, (uint
) len
, DMA_RX
, 0, dmah
);
3661 #ifndef IOCTLRESP_USE_CONSTMEM
3662 case PKTTYPE_IOCTL_RX
:
3663 dhd
->dma_stats
.ioctl_rx
--;
3664 dhd
->dma_stats
.ioctl_rx_sz
-= len
;
3666 #endif /* IOCTLRESP_USE_CONSTMEM */
3667 case PKTTYPE_EVENT_RX
:
3668 dhd
->dma_stats
.event_rx
--;
3669 dhd
->dma_stats
.event_rx_sz
-= len
;
3671 case PKTTYPE_INFO_RX
:
3672 dhd
->dma_stats
.info_rx
--;
3673 dhd
->dma_stats
.info_rx_sz
-= len
;
3675 case PKTTYPE_TSBUF_RX
:
3676 dhd
->dma_stats
.tsbuf_rx
--;
3677 dhd
->dma_stats
.tsbuf_rx_sz
-= len
;
3680 #endif /* DMAMAP_STATS */
3687 #ifdef IOCTLRESP_USE_CONSTMEM
3688 static INLINE
void BCMFASTPATH
3689 dhd_prot_ioctl_ret_buffer_get(dhd_pub_t
*dhd
, uint32 pktid
, dhd_dma_buf_t
*retbuf
)
3691 memset(retbuf
, 0, sizeof(dhd_dma_buf_t
));
3692 retbuf
->va
= DHD_PKTID_TO_NATIVE(dhd
, dhd
->prot
->pktid_map_handle_ioctl
, pktid
,
3693 retbuf
->pa
, retbuf
->len
, retbuf
->dmah
, retbuf
->secdma
, PKTTYPE_IOCTL_RX
);
3699 static void BCMFASTPATH
3700 dhd_msgbuf_rxbuf_post(dhd_pub_t
*dhd
, bool use_rsv_pktid
)
3702 dhd_prot_t
*prot
= dhd
->prot
;
3707 fillbufs
= prot
->max_rxbufpost
- prot
->rxbufpost
;
3708 while (fillbufs
>= RX_BUF_BURST
) {
3711 /* find a better way to reschedule rx buf post if space not available */
3712 DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
3713 DHD_ERROR(("Current posted host buf count %d \n", prot
->rxbufpost
));
3717 /* Post in a burst of 32 buffers at a time */
3718 fillbufs
= MIN(fillbufs
, RX_BUF_BURST
);
3721 retcount
= dhd_prot_rxbuf_post(dhd
, fillbufs
, use_rsv_pktid
);
3723 if (retcount
>= 0) {
3724 prot
->rxbufpost
+= (uint16
)retcount
;
3726 /* dhd_prot_rxbuf_post returns the number of buffers posted */
3727 DHD_LB_STATS_UPDATE_RXC_HISTO(dhd
, retcount
);
3728 #endif /* DHD_LB_RXC */
3729 /* how many more to post */
3730 fillbufs
= prot
->max_rxbufpost
- prot
->rxbufpost
;
3732 /* Make sure we don't run loop any further */
3738 /** Post 'count' no of rx buffers to dongle */
3739 static int BCMFASTPATH
3740 dhd_prot_rxbuf_post(dhd_pub_t
*dhd
, uint16 count
, bool use_rsv_pktid
)
3743 uint8
*rxbuf_post_tmp
;
3744 host_rxbuf_post_t
*rxbuf_post
;
3746 dmaaddr_t pa
, *pktbuf_pa
;
3748 uint16 i
= 0, alloced
= 0;
3749 unsigned long flags
;
3751 dhd_prot_t
*prot
= dhd
->prot
;
3752 msgbuf_ring_t
*ring
= &prot
->h2dring_rxp_subn
;
3754 uint16 lcl_buf_size
;
3755 uint16 pktsz
= prot
->rxbufpost_sz
;
3757 /* allocate a local buffer to store pkt buffer va, pa and length */
3758 lcl_buf_size
= (sizeof(void *) + sizeof(dmaaddr_t
) + sizeof(uint32
)) *
3760 lcl_buf
= MALLOC(dhd
->osh
, lcl_buf_size
);
3762 DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__
));
3766 pktbuf_pa
= (dmaaddr_t
*)((uint8
*)pktbuf
+ sizeof(void *) * RX_BUF_BURST
);
3767 pktlen
= (uint32
*)((uint8
*)pktbuf_pa
+ sizeof(dmaaddr_t
) * RX_BUF_BURST
);
3769 for (i
= 0; i
< count
; i
++) {
3770 if ((p
= PKTGET(dhd
->osh
, pktsz
, FALSE
)) == NULL
) {
3771 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__
, __LINE__
));
3772 dhd
->rx_pktgetfail
++;
3776 pktlen
[i
] = PKTLEN(dhd
->osh
, p
);
3777 if (SECURE_DMA_ENAB(dhd
->osh
)) {
3778 pa
= SECURE_DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
[i
],
3779 DMA_RX
, p
, 0, ring
->dma_buf
.secdma
, 0);
3781 #ifndef BCM_SECURE_DMA
3783 pa
= DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
[i
], DMA_RX
, p
, 0);
3784 #endif /* #ifndef BCM_SECURE_DMA */
3786 if (PHYSADDRISZERO(pa
)) {
3787 PKTFREE(dhd
->osh
, p
, FALSE
);
3788 DHD_ERROR(("Invalid phyaddr 0\n"));
3793 dhd
->dma_stats
.rxdata
++;
3794 dhd
->dma_stats
.rxdata_sz
+= pktlen
[i
];
3795 #endif /* DMAMAP_STATS */
3797 PKTPULL(dhd
->osh
, p
, prot
->rx_metadata_offset
);
3798 pktlen
[i
] = PKTLEN(dhd
->osh
, p
);
3803 /* only post what we have */
3806 /* grab the ring lock to allocate pktid and post on ring */
3807 DHD_RING_LOCK(ring
->ring_lock
, flags
);
3809 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3810 msg_start
= (void *)
3811 dhd_prot_alloc_ring_space(dhd
, ring
, count
, &alloced
, TRUE
);
3812 if (msg_start
== NULL
) {
3813 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__
, __LINE__
));
3814 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
3817 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3818 ASSERT(alloced
> 0);
3820 rxbuf_post_tmp
= (uint8
*)msg_start
;
3822 for (i
= 0; i
< alloced
; i
++) {
3823 rxbuf_post
= (host_rxbuf_post_t
*)rxbuf_post_tmp
;
3827 #if defined(DHD_LB_RXC)
3828 if (use_rsv_pktid
== TRUE
) {
3829 bcm_workq_t
*workq
= &prot
->rx_compl_cons
;
3830 int elem_ix
= bcm_ring_cons(WORKQ_RING(workq
), DHD_LB_WORKQ_SZ
);
3832 if (elem_ix
== BCM_RING_EMPTY
) {
3833 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__
));
3834 pktid
= DHD_PKTID_INVALID
;
3837 uint32
*elem
= WORKQ_ELEMENT(uint32
, workq
, elem_ix
);
3841 rxbuf_post
->cmn_hdr
.request_id
= htol32(pktid
);
3843 /* Now populate the previous locker with valid information */
3844 if (pktid
!= DHD_PKTID_INVALID
) {
3845 DHD_NATIVE_TO_PKTID_SAVE(dhd
, dhd
->prot
->pktid_rx_map
,
3846 p
, pktid
, pa
, pktlen
[i
], DMA_RX
, NULL
, NULL
,
3850 #endif /* ! DHD_LB_RXC */
3852 #if defined(DHD_LB_RXC)
3854 #endif /* DHD_LB_RXC */
3855 pktid
= DHD_NATIVE_TO_PKTID(dhd
, dhd
->prot
->pktid_rx_map
, p
, pa
,
3856 pktlen
[i
], DMA_RX
, NULL
, ring
->dma_buf
.secdma
, PKTTYPE_DATA_RX
);
3857 #if defined(DHD_PCIE_PKTID)
3858 if (pktid
== DHD_PKTID_INVALID
) {
3861 #endif /* DHD_PCIE_PKTID */
3864 /* Common msg header */
3865 rxbuf_post
->cmn_hdr
.msg_type
= MSG_TYPE_RXBUF_POST
;
3866 rxbuf_post
->cmn_hdr
.if_id
= 0;
3867 rxbuf_post
->cmn_hdr
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
3868 rxbuf_post
->cmn_hdr
.flags
= ring
->current_phase
;
3870 rxbuf_post
->data_buf_len
= htol16((uint16
)pktlen
[i
]);
3871 rxbuf_post
->data_buf_addr
.high_addr
= htol32(PHYSADDRHI(pa
));
3872 rxbuf_post
->data_buf_addr
.low_addr
=
3873 htol32(PHYSADDRLO(pa
) + prot
->rx_metadata_offset
);
3875 if (prot
->rx_metadata_offset
) {
3876 rxbuf_post
->metadata_buf_len
= prot
->rx_metadata_offset
;
3877 rxbuf_post
->metadata_buf_addr
.high_addr
= htol32(PHYSADDRHI(pa
));
3878 rxbuf_post
->metadata_buf_addr
.low_addr
= htol32(PHYSADDRLO(pa
));
3880 rxbuf_post
->metadata_buf_len
= 0;
3881 rxbuf_post
->metadata_buf_addr
.high_addr
= 0;
3882 rxbuf_post
->metadata_buf_addr
.low_addr
= 0;
3885 #ifdef DHD_PKTID_AUDIT_RING
3886 DHD_PKTID_AUDIT(dhd
, prot
->pktid_rx_map
, pktid
, DHD_DUPLICATE_ALLOC
);
3887 #endif /* DHD_PKTID_AUDIT_RING */
3889 rxbuf_post
->cmn_hdr
.request_id
= htol32(pktid
);
3891 /* Move rxbuf_post_tmp to next item */
3892 rxbuf_post_tmp
= rxbuf_post_tmp
+ ring
->item_len
;
3894 #ifdef DHD_LBUF_AUDIT
3895 PKTAUDIT(dhd
->osh
, p
);
3900 if (ring
->wr
< (alloced
- i
))
3901 ring
->wr
= ring
->max_items
- (alloced
- i
);
3903 ring
->wr
-= (alloced
- i
);
3905 if (ring
->wr
== 0) {
3906 DHD_INFO(("%s: flipping the phase now\n", ring
->name
));
3907 ring
->current_phase
= ring
->current_phase
?
3908 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
3914 /* update ring's WR index and ring doorbell to dongle */
3916 dhd_prot_ring_write_complete(dhd
, ring
, msg_start
, alloced
);
3919 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
3922 for (i
= alloced
; i
< count
; i
++) {
3926 if (SECURE_DMA_ENAB(dhd
->osh
))
3927 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
[i
], DMA_RX
, 0,
3928 DHD_DMAH_NULL
, ring
->dma_buf
.secdma
, 0);
3930 DMA_UNMAP(dhd
->osh
, pa
, pktlen
[i
], DMA_RX
, 0, DHD_DMAH_NULL
);
3931 PKTFREE(dhd
->osh
, p
, FALSE
);
3934 MFREE(dhd
->osh
, lcl_buf
, lcl_buf_size
);
3937 } /* dhd_prot_rxbufpost */
3940 dhd_prot_infobufpost(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
)
3942 unsigned long flags
;
3944 dhd_prot_t
*prot
= dhd
->prot
;
3946 uint16 pktsz
= DHD_INFOBUF_RX_BUFPOST_PKTSZ
;
3948 info_buf_post_msg_t
*infobuf_post
;
3949 uint8
*infobuf_post_tmp
;
3959 if (ring
->inited
!= TRUE
)
3961 if (ring
== dhd
->prot
->h2dring_info_subn
) {
3962 if (prot
->max_infobufpost
== 0)
3965 count
= prot
->max_infobufpost
- prot
->infobufpost
;
3968 DHD_ERROR(("Unknown ring\n"));
3973 DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
3978 /* grab the ring lock to allocate pktid and post on ring */
3979 DHD_RING_LOCK(ring
->ring_lock
, flags
);
3981 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3982 msg_start
= (void *) dhd_prot_alloc_ring_space(dhd
, ring
, count
, &alloced
, FALSE
);
3984 if (msg_start
== NULL
) {
3985 DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__
, __LINE__
));
3986 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
3990 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3991 ASSERT(alloced
> 0);
3993 infobuf_post_tmp
= (uint8
*) msg_start
;
3995 /* loop through each allocated message in the host ring */
3996 for (i
= 0; i
< alloced
; i
++) {
3997 infobuf_post
= (info_buf_post_msg_t
*) infobuf_post_tmp
;
3998 /* Create a rx buffer */
3999 #ifdef DHD_USE_STATIC_CTRLBUF
4000 p
= PKTGET_STATIC(dhd
->osh
, pktsz
, FALSE
);
4002 p
= PKTGET(dhd
->osh
, pktsz
, FALSE
);
4003 #endif /* DHD_USE_STATIC_CTRLBUF */
4005 DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__
, __LINE__
));
4006 dhd
->rx_pktgetfail
++;
4009 pktlen
= PKTLEN(dhd
->osh
, p
);
4010 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4011 pa
= SECURE_DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
,
4012 DMA_RX
, p
, 0, ring
->dma_buf
.secdma
, 0);
4014 #ifndef BCM_SECURE_DMA
4016 pa
= DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
, DMA_RX
, p
, 0);
4017 #endif /* #ifndef BCM_SECURE_DMA */
4018 if (PHYSADDRISZERO(pa
)) {
4019 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4020 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
,
4021 ring
->dma_buf
.secdma
, 0);
4024 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
);
4025 #ifdef DHD_USE_STATIC_CTRLBUF
4026 PKTFREE_STATIC(dhd
->osh
, p
, FALSE
);
4028 PKTFREE(dhd
->osh
, p
, FALSE
);
4029 #endif /* DHD_USE_STATIC_CTRLBUF */
4030 DHD_ERROR(("Invalid phyaddr 0\n"));
4035 dhd
->dma_stats
.info_rx
++;
4036 dhd
->dma_stats
.info_rx_sz
+= pktlen
;
4037 #endif /* DMAMAP_STATS */
4038 pktlen
= PKTLEN(dhd
->osh
, p
);
4040 /* Common msg header */
4041 infobuf_post
->cmn_hdr
.msg_type
= MSG_TYPE_INFO_BUF_POST
;
4042 infobuf_post
->cmn_hdr
.if_id
= 0;
4043 infobuf_post
->cmn_hdr
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
4044 infobuf_post
->cmn_hdr
.flags
= ring
->current_phase
;
4047 pktid
= DHD_NATIVE_TO_PKTID(dhd
, dhd
->prot
->pktid_ctrl_map
, p
, pa
,
4048 pktlen
, DMA_RX
, NULL
, ring
->dma_buf
.secdma
, PKTTYPE_INFO_RX
);
4050 #if defined(DHD_PCIE_PKTID)
4051 if (pktid
== DHD_PKTID_INVALID
) {
4052 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4053 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, 0,
4054 ring
->dma_buf
.secdma
, 0);
4056 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, 0);
4058 #ifdef DHD_USE_STATIC_CTRLBUF
4059 PKTFREE_STATIC(dhd
->osh
, p
, FALSE
);
4061 PKTFREE(dhd
->osh
, p
, FALSE
);
4062 #endif /* DHD_USE_STATIC_CTRLBUF */
4063 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__
));
4066 #endif /* DHD_PCIE_PKTID */
4068 infobuf_post
->host_buf_len
= htol16((uint16
)pktlen
);
4069 infobuf_post
->host_buf_addr
.high_addr
= htol32(PHYSADDRHI(pa
));
4070 infobuf_post
->host_buf_addr
.low_addr
= htol32(PHYSADDRLO(pa
));
4072 #ifdef DHD_PKTID_AUDIT_RING
4073 DHD_PKTID_AUDIT(dhd
, prot
->pktid_ctrl_map
, pktid
, DHD_DUPLICATE_ALLOC
);
4074 #endif /* DHD_PKTID_AUDIT_RING */
4076 DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
4077 infobuf_post
->cmn_hdr
.request_id
, infobuf_post
->host_buf_addr
.low_addr
,
4078 infobuf_post
->host_buf_addr
.high_addr
));
4080 infobuf_post
->cmn_hdr
.request_id
= htol32(pktid
);
4081 /* Move rxbuf_post_tmp to next item */
4082 infobuf_post_tmp
= infobuf_post_tmp
+ ring
->item_len
;
4083 #ifdef DHD_LBUF_AUDIT
4084 PKTAUDIT(dhd
->osh
, p
);
4089 if (ring
->wr
< (alloced
- i
))
4090 ring
->wr
= ring
->max_items
- (alloced
- i
);
4092 ring
->wr
-= (alloced
- i
);
4095 if (alloced
&& ring
->wr
== 0) {
4096 DHD_INFO(("%s: flipping the phase now\n", ring
->name
));
4097 ring
->current_phase
= ring
->current_phase
?
4098 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
4102 /* Update the write pointer in TCM & ring bell */
4104 if (ring
== dhd
->prot
->h2dring_info_subn
) {
4105 prot
->infobufpost
+= alloced
;
4107 dhd_prot_ring_write_complete(dhd
, ring
, msg_start
, alloced
);
4110 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4113 } /* dhd_prot_infobufpost */
4115 #ifdef IOCTLRESP_USE_CONSTMEM
4117 alloc_ioctl_return_buffer(dhd_pub_t
*dhd
, dhd_dma_buf_t
*retbuf
)
4120 memset(retbuf
, 0, sizeof(dhd_dma_buf_t
));
4122 if ((err
= dhd_dma_buf_alloc(dhd
, retbuf
, IOCT_RETBUF_SIZE
)) != BCME_OK
) {
4123 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__
, err
));
4132 free_ioctl_return_buffer(dhd_pub_t
*dhd
, dhd_dma_buf_t
*retbuf
)
4134 /* retbuf (declared on stack) not fully populated ... */
4137 dma_pad
= (IOCT_RETBUF_SIZE
% DHD_DMA_PAD
) ? DHD_DMA_PAD
: 0;
4138 retbuf
->len
= IOCT_RETBUF_SIZE
;
4139 retbuf
->_alloced
= retbuf
->len
+ dma_pad
;
4142 dhd_dma_buf_free(dhd
, retbuf
);
4145 #endif /* IOCTLRESP_USE_CONSTMEM */
4148 dhd_prot_rxbufpost_ctrl(dhd_pub_t
*dhd
, uint8 msg_type
)
4152 ioctl_resp_evt_buf_post_msg_t
*rxbuf_post
;
4155 dhd_prot_t
*prot
= dhd
->prot
;
4157 unsigned long flags
;
4158 dhd_dma_buf_t retbuf
;
4162 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
4163 bool non_ioctl_resp_buf
= 0;
4164 dhd_pkttype_t buf_type
;
4166 if (dhd
->busstate
== DHD_BUS_DOWN
) {
4167 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__
));
4170 memset(&retbuf
, 0, sizeof(dhd_dma_buf_t
));
4172 if (msg_type
== MSG_TYPE_IOCTLRESP_BUF_POST
)
4173 buf_type
= PKTTYPE_IOCTL_RX
;
4174 else if (msg_type
== MSG_TYPE_EVENT_BUF_POST
)
4175 buf_type
= PKTTYPE_EVENT_RX
;
4176 else if (msg_type
== MSG_TYPE_TIMSTAMP_BUFPOST
)
4177 buf_type
= PKTTYPE_TSBUF_RX
;
4179 DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type
));
4183 if ((msg_type
== MSG_TYPE_EVENT_BUF_POST
) || (msg_type
== MSG_TYPE_TIMSTAMP_BUFPOST
))
4184 non_ioctl_resp_buf
= TRUE
;
4186 non_ioctl_resp_buf
= FALSE
;
4188 if (non_ioctl_resp_buf
) {
4189 /* Allocate packet for not ioctl resp buffer post */
4190 pktsz
= DHD_FLOWRING_RX_BUFPOST_PKTSZ
;
4192 /* Allocate packet for ctrl/ioctl buffer post */
4193 pktsz
= DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ
;
4196 #ifdef IOCTLRESP_USE_CONSTMEM
4197 if (!non_ioctl_resp_buf
) {
4198 if (alloc_ioctl_return_buffer(dhd
, &retbuf
) != BCME_OK
) {
4199 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
4202 ASSERT(retbuf
.len
== IOCT_RETBUF_SIZE
);
4204 pktlen
= retbuf
.len
;
4208 #endif /* IOCTLRESP_USE_CONSTMEM */
4210 #ifdef DHD_USE_STATIC_CTRLBUF
4211 p
= PKTGET_STATIC(dhd
->osh
, pktsz
, FALSE
);
4213 p
= PKTGET(dhd
->osh
, pktsz
, FALSE
);
4214 #endif /* DHD_USE_STATIC_CTRLBUF */
4216 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
4217 __FUNCTION__
, __LINE__
, non_ioctl_resp_buf
?
4218 "EVENT" : "IOCTL RESP"));
4219 dhd
->rx_pktgetfail
++;
4223 pktlen
= PKTLEN(dhd
->osh
, p
);
4225 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4226 pa
= SECURE_DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
,
4227 DMA_RX
, p
, 0, ring
->dma_buf
.secdma
, 0);
4229 #ifndef BCM_SECURE_DMA
4231 pa
= DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, p
), pktlen
, DMA_RX
, p
, 0);
4232 #endif /* #ifndef BCM_SECURE_DMA */
4234 if (PHYSADDRISZERO(pa
)) {
4235 DHD_ERROR(("Invalid physaddr 0\n"));
4237 goto free_pkt_return
;
4242 #ifndef IOCTLRESP_USE_CONSTMEM
4243 case PKTTYPE_IOCTL_RX
:
4244 dhd
->dma_stats
.ioctl_rx
++;
4245 dhd
->dma_stats
.ioctl_rx_sz
+= pktlen
;
4247 #endif /* !IOCTLRESP_USE_CONSTMEM */
4248 case PKTTYPE_EVENT_RX
:
4249 dhd
->dma_stats
.event_rx
++;
4250 dhd
->dma_stats
.event_rx_sz
+= pktlen
;
4252 case PKTTYPE_TSBUF_RX
:
4253 dhd
->dma_stats
.tsbuf_rx
++;
4254 dhd
->dma_stats
.tsbuf_rx_sz
+= pktlen
;
4259 #endif /* DMAMAP_STATS */
4263 /* grab the ring lock to allocate pktid and post on ring */
4264 DHD_RING_LOCK(ring
->ring_lock
, flags
);
4266 rxbuf_post
= (ioctl_resp_evt_buf_post_msg_t
*)
4267 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
4269 if (rxbuf_post
== NULL
) {
4270 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4271 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
4272 __FUNCTION__
, __LINE__
));
4274 #ifdef IOCTLRESP_USE_CONSTMEM
4275 if (non_ioctl_resp_buf
)
4276 #endif /* IOCTLRESP_USE_CONSTMEM */
4278 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4279 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
,
4280 ring
->dma_buf
.secdma
, 0);
4282 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
);
4285 goto free_pkt_return
;
4288 /* CMN msg header */
4289 rxbuf_post
->cmn_hdr
.msg_type
= msg_type
;
4291 #ifdef IOCTLRESP_USE_CONSTMEM
4292 if (!non_ioctl_resp_buf
) {
4293 map_handle
= dhd
->prot
->pktid_map_handle_ioctl
;
4294 pktid
= DHD_NATIVE_TO_PKTID(dhd
, map_handle
, p
, pa
, pktlen
, DMA_RX
, dmah
,
4295 ring
->dma_buf
.secdma
, buf_type
);
4297 #endif /* IOCTLRESP_USE_CONSTMEM */
4299 map_handle
= dhd
->prot
->pktid_ctrl_map
;
4300 pktid
= DHD_NATIVE_TO_PKTID(dhd
, map_handle
,
4301 p
, pa
, pktlen
, DMA_RX
, dmah
, ring
->dma_buf
.secdma
,
4305 if (pktid
== DHD_PKTID_INVALID
) {
4306 if (ring
->wr
== 0) {
4307 ring
->wr
= ring
->max_items
- 1;
4310 if (ring
->wr
== 0) {
4311 ring
->current_phase
= ring
->current_phase
? 0 :
4312 BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
4315 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4316 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
);
4317 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__
));
4318 goto free_pkt_return
;
4321 #ifdef DHD_PKTID_AUDIT_RING
4322 DHD_PKTID_AUDIT(dhd
, map_handle
, pktid
, DHD_DUPLICATE_ALLOC
);
4323 #endif /* DHD_PKTID_AUDIT_RING */
4325 rxbuf_post
->cmn_hdr
.request_id
= htol32(pktid
);
4326 rxbuf_post
->cmn_hdr
.if_id
= 0;
4327 rxbuf_post
->cmn_hdr
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
4329 rxbuf_post
->cmn_hdr
.flags
= ring
->current_phase
;
4331 #if defined(DHD_PCIE_PKTID)
4332 if (rxbuf_post
->cmn_hdr
.request_id
== DHD_PKTID_INVALID
) {
4333 if (ring
->wr
== 0) {
4334 ring
->wr
= ring
->max_items
- 1;
4336 if (ring
->wr
== 0) {
4337 ring
->current_phase
= ring
->current_phase
? 0 :
4338 BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
4341 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4342 #ifdef IOCTLRESP_USE_CONSTMEM
4343 if (non_ioctl_resp_buf
)
4344 #endif /* IOCTLRESP_USE_CONSTMEM */
4346 if (SECURE_DMA_ENAB(dhd
->osh
)) {
4347 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
,
4348 ring
->dma_buf
.secdma
, 0);
4350 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_RX
, 0, DHD_DMAH_NULL
);
4352 goto free_pkt_return
;
4354 #endif /* DHD_PCIE_PKTID */
4356 #ifndef IOCTLRESP_USE_CONSTMEM
4357 rxbuf_post
->host_buf_len
= htol16((uint16
)PKTLEN(dhd
->osh
, p
));
4359 rxbuf_post
->host_buf_len
= htol16((uint16
)pktlen
);
4360 #endif /* IOCTLRESP_USE_CONSTMEM */
4361 rxbuf_post
->host_buf_addr
.high_addr
= htol32(PHYSADDRHI(pa
));
4362 rxbuf_post
->host_buf_addr
.low_addr
= htol32(PHYSADDRLO(pa
));
4364 #ifdef DHD_LBUF_AUDIT
4365 if (non_ioctl_resp_buf
)
4366 PKTAUDIT(dhd
->osh
, p
);
4369 /* update ring's WR index and ring doorbell to dongle */
4370 dhd_prot_ring_write_complete(dhd
, ring
, rxbuf_post
, 1);
4372 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4377 if (!non_ioctl_resp_buf
) {
4378 #ifdef IOCTLRESP_USE_CONSTMEM
4379 free_ioctl_return_buffer(dhd
, &retbuf
);
4381 dhd_prot_packet_free(dhd
, p
, buf_type
, FALSE
);
4382 #endif /* IOCTLRESP_USE_CONSTMEM */
4384 dhd_prot_packet_free(dhd
, p
, buf_type
, FALSE
);
4388 } /* dhd_prot_rxbufpost_ctrl */
4391 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t
*dhd
, uint8 msg_type
, uint32 max_to_post
)
4396 DHD_INFO(("max to post %d, event %d \n", max_to_post
, msg_type
));
4398 if (dhd
->busstate
== DHD_BUS_DOWN
) {
4399 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__
));
4403 while (i
< max_to_post
) {
4404 ret_val
= dhd_prot_rxbufpost_ctrl(dhd
, msg_type
);
4409 DHD_INFO(("posted %d buffers of type %d\n", i
, msg_type
));
4414 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t
*dhd
)
4416 dhd_prot_t
*prot
= dhd
->prot
;
4419 DHD_INFO(("ioctl resp buf post\n"));
4420 max_to_post
= prot
->max_ioctlrespbufpost
- prot
->cur_ioctlresp_bufs_posted
;
4421 if (max_to_post
<= 0) {
4422 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
4426 prot
->cur_ioctlresp_bufs_posted
+= dhd_msgbuf_rxbuf_post_ctrlpath(dhd
,
4427 MSG_TYPE_IOCTLRESP_BUF_POST
, max_to_post
);
4431 dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t
*dhd
)
4433 dhd_prot_t
*prot
= dhd
->prot
;
4436 max_to_post
= prot
->max_eventbufpost
- prot
->cur_event_bufs_posted
;
4437 if (max_to_post
<= 0) {
4438 DHD_ERROR(("%s: Cannot post more than max event buffers\n",
4442 prot
->cur_event_bufs_posted
+= dhd_msgbuf_rxbuf_post_ctrlpath(dhd
,
4443 MSG_TYPE_EVENT_BUF_POST
, max_to_post
);
4447 dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t
*dhd
)
4453 dhd_prot_process_msgbuf_infocpl(dhd_pub_t
*dhd
, uint bound
)
4455 dhd_prot_t
*prot
= dhd
->prot
;
4458 msgbuf_ring_t
*ring
= prot
->d2hring_info_cpln
;
4459 unsigned long flags
;
4463 if (ring
->inited
!= TRUE
)
4466 /* Process all the messages - DTOH direction */
4467 while (!dhd_is_device_removed(dhd
)) {
4471 if (dhd
->hang_was_sent
) {
4476 #ifdef DHD_MAP_LOGGING
4477 if (dhd
->smmu_fault_occurred
) {
4481 #endif /* DHD_MAP_LOGGING */
4483 DHD_RING_LOCK(ring
->ring_lock
, flags
);
4484 /* Get the message from ring */
4485 msg_addr
= dhd_prot_get_read_addr(dhd
, ring
, &msg_len
);
4486 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4487 if (msg_addr
== NULL
) {
4492 /* Prefetch data to populate the cache */
4493 OSL_PREFETCH(msg_addr
);
4495 if (dhd_prot_process_msgtype(dhd
, ring
, msg_addr
, msg_len
) != BCME_OK
) {
4496 DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
4497 __FUNCTION__
, msg_len
));
4500 /* Update read pointer */
4501 dhd_prot_upd_read_idx(dhd
, ring
);
4503 /* After batch processing, check RX bound */
4504 n
+= msg_len
/ ring
->item_len
;
4513 /** called when DHD needs to check for 'receive complete' messages from the dongle */
4515 dhd_prot_process_msgbuf_rxcpl(dhd_pub_t
*dhd
, uint bound
)
4519 dhd_prot_t
*prot
= dhd
->prot
;
4520 msgbuf_ring_t
*ring
= &prot
->d2hring_rx_cpln
;
4521 uint16 item_len
= ring
->item_len
;
4522 host_rxbuf_cmpl_t
*msg
= NULL
;
4525 uint16 pkt_cnt
, pkt_cnt_newidx
;
4526 unsigned long flags
;
4531 int ifidx
= 0, if_newidx
= 0;
4532 void *pkt
, *pktqhead
= NULL
, *prevpkt
= NULL
, *pkt_newidx
, *nextpkt
;
4538 if (dhd_is_device_removed(dhd
))
4541 if (dhd
->hang_was_sent
)
4544 #ifdef DHD_MAP_LOGGING
4545 if (dhd
->smmu_fault_occurred
) {
4548 #endif /* DHD_MAP_LOGGING */
4551 pktqhead
= pkt_newidx
= NULL
;
4554 DHD_RING_LOCK(ring
->ring_lock
, flags
);
4556 /* Get the address of the next message to be read from ring */
4557 msg_addr
= dhd_prot_get_read_addr(dhd
, ring
, &msg_len
);
4558 if (msg_addr
== NULL
) {
4559 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4563 while (msg_len
> 0) {
4564 msg
= (host_rxbuf_cmpl_t
*)msg_addr
;
4566 /* Wait until DMA completes, then fetch msg_type */
4567 sync
= prot
->d2h_sync_cb(dhd
, ring
, &msg
->cmn_hdr
, item_len
);
4569 * Update the curr_rd to the current index in the ring, from where
4570 * the work item is fetched. This way if the fetched work item
4571 * fails in LIVELOCK, we can print the exact read index in the ring
4572 * that shows up the corrupted work item.
4574 if ((ring
->curr_rd
+ 1) >= ring
->max_items
) {
4581 msg_len
-= item_len
;
4582 msg_addr
+= item_len
;
4586 pktid
= ltoh32(msg
->cmn_hdr
.request_id
);
4588 #ifdef DHD_PKTID_AUDIT_RING
4589 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_rx_map
, pktid
,
4590 DHD_DUPLICATE_FREE
, msg
, D2HRING_RXCMPLT_ITEMSIZE
);
4591 #endif /* DHD_PKTID_AUDIT_RING */
4593 pkt
= DHD_PKTID_TO_NATIVE(dhd
, prot
->pktid_rx_map
, pktid
, pa
,
4594 len
, dmah
, secdma
, PKTTYPE_DATA_RX
);
4596 msg_len
-= item_len
;
4597 msg_addr
+= item_len
;
4601 if (SECURE_DMA_ENAB(dhd
->osh
))
4602 SECURE_DMA_UNMAP(dhd
->osh
, pa
, (uint
) len
, DMA_RX
, 0,
4605 DMA_UNMAP(dhd
->osh
, pa
, (uint
) len
, DMA_RX
, 0, dmah
);
4608 dhd
->dma_stats
.rxdata
--;
4609 dhd
->dma_stats
.rxdata_sz
-= len
;
4610 #endif /* DMAMAP_STATS */
4611 DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
4612 "pktdata %p, metalen %d\n",
4613 ltoh32(msg
->cmn_hdr
.request_id
),
4614 ltoh16(msg
->data_offset
),
4615 ltoh16(msg
->data_len
), msg
->cmn_hdr
.if_id
,
4616 msg
->cmn_hdr
.flags
, PKTDATA(dhd
->osh
, pkt
),
4617 ltoh16(msg
->metadata_len
)));
4620 msg_len
-= item_len
;
4621 msg_addr
+= item_len
;
4623 #if DHD_DBG_SHOW_METADATA
4624 if (prot
->metadata_dbg
&& prot
->rx_metadata_offset
&&
4625 msg
->metadata_len
) {
4627 ptr
= PKTDATA(dhd
->osh
, pkt
) - (prot
->rx_metadata_offset
);
4628 /* header followed by data */
4629 bcm_print_bytes("rxmetadata", ptr
, msg
->metadata_len
);
4630 dhd_prot_print_metadata(dhd
, ptr
, msg
->metadata_len
);
4632 #endif /* DHD_DBG_SHOW_METADATA */
4634 /* data_offset from buf start */
4635 if (ltoh16(msg
->data_offset
)) {
4636 /* data offset given from dongle after split rx */
4637 PKTPULL(dhd
->osh
, pkt
, ltoh16(msg
->data_offset
));
4639 else if (prot
->rx_dataoffset
) {
4640 /* DMA RX offset updated through shared area */
4641 PKTPULL(dhd
->osh
, pkt
, prot
->rx_dataoffset
);
4643 /* Actual length of the packet */
4644 PKTSETLEN(dhd
->osh
, pkt
, ltoh16(msg
->data_len
));
4645 #if defined(WL_MONITOR)
4646 if (dhd_monitor_enabled(dhd
, ifidx
) &&
4647 (msg
->flags
& BCMPCIE_PKT_FLAGS_FRAME_802_11
)) {
4648 dhd_rx_mon_pkt(dhd
, msg
, pkt
, ifidx
);
4654 pktqhead
= prevpkt
= pkt
;
4655 ifidx
= msg
->cmn_hdr
.if_id
;
4657 if (ifidx
!= msg
->cmn_hdr
.if_id
) {
4659 if_newidx
= msg
->cmn_hdr
.if_id
;
4664 PKTSETNEXT(dhd
->osh
, prevpkt
, pkt
);
4669 #ifdef DHD_LBUF_AUDIT
4670 PKTAUDIT(dhd
->osh
, pkt
);
4674 /* roll back read pointer for unprocessed message */
4676 if (ring
->rd
< msg_len
/ item_len
)
4677 ring
->rd
= ring
->max_items
- msg_len
/ item_len
;
4679 ring
->rd
-= msg_len
/ item_len
;
4682 /* Update read pointer */
4683 dhd_prot_upd_read_idx(dhd
, ring
);
4685 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4688 for (i
= 0; pkt
&& i
< pkt_cnt
; i
++, pkt
= nextpkt
) {
4689 nextpkt
= PKTNEXT(dhd
->osh
, pkt
);
4690 PKTSETNEXT(dhd
->osh
, pkt
, NULL
);
4692 dhd_lb_rx_pkt_enqueue(dhd
, pkt
, ifidx
);
4693 #elif defined(DHD_RX_CHAINING)
4694 dhd_rxchain_frame(dhd
, pkt
, ifidx
);
4696 dhd_bus_rx_frame(dhd
->bus
, pkt
, ifidx
, 1);
4697 #endif /* DHD_LB_RXP */
4702 dhd_lb_rx_pkt_enqueue(dhd
, pkt_newidx
, if_newidx
);
4703 #elif defined(DHD_RX_CHAINING)
4704 dhd_rxchain_frame(dhd
, pkt_newidx
, if_newidx
);
4706 dhd_bus_rx_frame(dhd
->bus
, pkt_newidx
, if_newidx
, 1);
4707 #endif /* DHD_LB_RXP */
4710 pkt_cnt
+= pkt_cnt_newidx
;
4712 /* Post another set of rxbufs to the device */
4713 dhd_prot_return_rxbuf(dhd
, 0, pkt_cnt
);
4715 #ifdef DHD_RX_CHAINING
4716 dhd_rxchain_commit(dhd
);
4719 /* After batch processing, check RX bound */
4727 /* Call lb_dispatch only if packets are queued */
4729 DHD_LB_DISPATCH_RX_COMPL(dhd
);
4730 DHD_LB_DISPATCH_RX_PROCESS(dhd
);
4737 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
4740 dhd_prot_update_txflowring(dhd_pub_t
*dhd
, uint16 flowid
, void *msgring
)
4742 msgbuf_ring_t
*ring
= (msgbuf_ring_t
*)msgring
;
4745 DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__
));
4748 /* Update read pointer */
4749 if (dhd
->dma_d2h_ring_upd_support
) {
4750 ring
->rd
= dhd_prot_dma_indx_get(dhd
, H2D_DMA_INDX_RD_UPD
, ring
->idx
);
4753 DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
4754 ring
->idx
, flowid
, ring
->wr
, ring
->rd
));
4756 /* Need more logic here, but for now use it directly */
4757 dhd_bus_schedule_queue(dhd
->bus
, flowid
, TRUE
); /* from queue to flowring */
4760 /** called when DHD needs to check for 'transmit complete' messages from the dongle */
4762 dhd_prot_process_msgbuf_txcpl(dhd_pub_t
*dhd
, uint bound
)
4766 msgbuf_ring_t
*ring
= &dhd
->prot
->d2hring_tx_cpln
;
4767 unsigned long flags
;
4769 /* Process all the messages - DTOH direction */
4770 while (!dhd_is_device_removed(dhd
)) {
4774 if (dhd
->hang_was_sent
) {
4779 #ifdef DHD_MAP_LOGGING
4780 if (dhd
->smmu_fault_occurred
) {
4784 #endif /* DHD_MAP_LOGGING */
4786 DHD_RING_LOCK(ring
->ring_lock
, flags
);
4787 /* Get the address of the next message to be read from ring */
4788 msg_addr
= dhd_prot_get_read_addr(dhd
, ring
, &msg_len
);
4789 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4791 if (msg_addr
== NULL
) {
4796 /* Prefetch data to populate the cache */
4797 OSL_PREFETCH(msg_addr
);
4799 if (dhd_prot_process_msgtype(dhd
, ring
, msg_addr
, msg_len
) != BCME_OK
) {
4800 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4801 __FUNCTION__
, ring
->name
, msg_addr
, msg_len
));
4804 /* Write to dngl rd ptr */
4805 dhd_prot_upd_read_idx(dhd
, ring
);
4807 /* After batch processing, check bound */
4808 n
+= msg_len
/ ring
->item_len
;
4814 DHD_LB_DISPATCH_TX_COMPL(dhd
);
4820 dhd_prot_process_trapbuf(dhd_pub_t
*dhd
)
4823 dhd_dma_buf_t
*trap_addr
= &dhd
->prot
->fw_trap_buf
;
4825 /* Interrupts can come in before this struct
4826 * has been initialized.
4828 if (trap_addr
->va
== NULL
) {
4829 DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__
));
4833 OSL_CACHE_INV((void *)trap_addr
->va
, sizeof(uint32
));
4834 data
= *(uint32
*)(trap_addr
->va
);
4836 if (data
& D2H_DEV_FWHALT
) {
4837 DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data
));
4839 if (data
& D2H_DEV_EXT_TRAP_DATA
)
4841 if (dhd
->extended_trap_data
) {
4842 OSL_CACHE_INV((void *)trap_addr
->va
,
4843 BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
4844 memcpy(dhd
->extended_trap_data
, (uint32
*)trap_addr
->va
,
4845 BCMPCIE_EXT_TRAP_DATA_MAXLEN
);
4847 DHD_ERROR(("Extended trap data available\n"));
4854 /** called when DHD needs to check for 'ioctl complete' messages from the dongle */
4856 dhd_prot_process_ctrlbuf(dhd_pub_t
*dhd
)
4858 dhd_prot_t
*prot
= dhd
->prot
;
4859 msgbuf_ring_t
*ring
= &prot
->d2hring_ctrl_cpln
;
4860 unsigned long flags
;
4862 /* Process all the messages - DTOH direction */
4863 while (!dhd_is_device_removed(dhd
)) {
4867 if (dhd
->hang_was_sent
) {
4871 #ifdef DHD_MAP_LOGGING
4872 if (dhd
->smmu_fault_occurred
) {
4875 #endif /* DHD_MAP_LOGGING */
4877 DHD_RING_LOCK(ring
->ring_lock
, flags
);
4878 /* Get the address of the next message to be read from ring */
4879 msg_addr
= dhd_prot_get_read_addr(dhd
, ring
, &msg_len
);
4880 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
4882 if (msg_addr
== NULL
) {
4886 /* Prefetch data to populate the cache */
4887 OSL_PREFETCH(msg_addr
);
4888 if (dhd_prot_process_msgtype(dhd
, ring
, msg_addr
, msg_len
) != BCME_OK
) {
4889 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4890 __FUNCTION__
, ring
->name
, msg_addr
, msg_len
));
4893 /* Write to dngl rd ptr */
4894 dhd_prot_upd_read_idx(dhd
, ring
);
4901 * Consume messages out of the D2H ring. Ensure that the message's DMA to host
4902 * memory has completed, before invoking the message handler via a table lookup
4903 * of the cmn_msg_hdr::msg_type.
4905 static int BCMFASTPATH
4906 dhd_prot_process_msgtype(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, uint8
*buf
, uint32 len
)
4908 uint32 buf_len
= len
;
4911 cmn_msg_hdr_t
*msg
= NULL
;
4915 item_len
= ring
->item_len
;
4916 if (item_len
== 0) {
4917 DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
4918 __FUNCTION__
, ring
->idx
, item_len
, buf_len
));
4922 while (buf_len
> 0) {
4923 if (dhd
->hang_was_sent
) {
4928 #ifdef DHD_MAP_LOGGING
4929 if (dhd
->smmu_fault_occurred
) {
4933 #endif /* DHD_MAP_LOGGING */
4935 msg
= (cmn_msg_hdr_t
*)buf
;
4937 /* Wait until DMA completes, then fetch msg_type */
4938 msg_type
= dhd
->prot
->d2h_sync_cb(dhd
, ring
, msg
, item_len
);
4941 * Update the curr_rd to the current index in the ring, from where
4942 * the work item is fetched. This way if the fetched work item
4943 * fails in LIVELOCK, we can print the exact read index in the ring
4944 * that shows up the corrupted work item.
4946 if ((ring
->curr_rd
+ 1) >= ring
->max_items
) {
4952 /* Prefetch data to populate the cache */
4953 OSL_PREFETCH(buf
+ item_len
);
4955 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
4956 msg_type
, item_len
, buf_len
));
4958 if (msg_type
== MSG_TYPE_LOOPBACK
) {
4959 bcm_print_bytes("LPBK RESP: ", (uint8
*)msg
, item_len
);
4960 DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len
));
4963 ASSERT(msg_type
< DHD_PROT_FUNCS
);
4964 if (msg_type
>= DHD_PROT_FUNCS
) {
4965 DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
4966 __FUNCTION__
, msg_type
, item_len
, buf_len
));
4971 if (msg_type
== MSG_TYPE_INFO_BUF_CMPLT
) {
4972 if (ring
== dhd
->prot
->d2hring_info_cpln
) {
4973 if (!dhd
->prot
->infobufpost
) {
4974 DHD_ERROR(("infobuf posted are zero,"
4975 "but there is a completion\n"));
4978 dhd
->prot
->infobufpost
--;
4979 dhd_prot_infobufpost(dhd
, dhd
->prot
->h2dring_info_subn
);
4980 dhd_prot_process_infobuf_complete(dhd
, buf
);
4983 if (table_lookup
[msg_type
]) {
4984 table_lookup
[msg_type
](dhd
, buf
);
4987 if (buf_len
< item_len
) {
4991 buf_len
= buf_len
- item_len
;
4992 buf
= buf
+ item_len
;
4997 #ifdef DHD_RX_CHAINING
4998 dhd_rxchain_commit(dhd
);
5002 } /* dhd_prot_process_msgtype */
5005 dhd_prot_noop(dhd_pub_t
*dhd
, void *msg
)
5010 /** called on MSG_TYPE_RING_STATUS message received from dongle */
5012 dhd_prot_ringstatus_process(dhd_pub_t
*dhd
, void *msg
)
5014 pcie_ring_status_t
*ring_status
= (pcie_ring_status_t
*) msg
;
5015 uint32 request_id
= ltoh32(ring_status
->cmn_hdr
.request_id
);
5016 uint16 status
= ltoh16(ring_status
->compl_hdr
.status
);
5017 uint16 ring_id
= ltoh16(ring_status
->compl_hdr
.flow_ring_id
);
5019 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
5020 request_id
, status
, ring_id
, ltoh16(ring_status
->write_idx
)));
5022 if (ltoh16(ring_status
->compl_hdr
.ring_id
) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT
)
5024 if (status
== BCMPCIE_BAD_PHASE
) {
5025 /* bad phase report from */
5026 DHD_ERROR(("Bad phase\n"));
5028 if (status
!= BCMPCIE_BADOPTION
)
5031 if (request_id
== DHD_H2D_DBGRING_REQ_PKTID
) {
5032 if (dhd
->prot
->h2dring_info_subn
!= NULL
) {
5033 if (dhd
->prot
->h2dring_info_subn
->create_pending
== TRUE
) {
5034 DHD_ERROR(("H2D ring create failed for info ring\n"));
5035 dhd
->prot
->h2dring_info_subn
->create_pending
= FALSE
;
5038 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
5040 DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__
));
5043 else if (request_id
== DHD_D2H_DBGRING_REQ_PKTID
) {
5044 if (dhd
->prot
->d2hring_info_cpln
!= NULL
) {
5045 if (dhd
->prot
->d2hring_info_cpln
->create_pending
== TRUE
) {
5046 DHD_ERROR(("D2H ring create failed for info ring\n"));
5047 dhd
->prot
->d2hring_info_cpln
->create_pending
= FALSE
;
5050 DHD_ERROR(("ring create ID for info ring, create not pending\n"));
5052 DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__
));
5056 DHD_ERROR(("don;t know how to pair with original request\n"));
5058 /* How do we track this to pair it with ??? */
5062 /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
5064 dhd_prot_genstatus_process(dhd_pub_t
*dhd
, void *msg
)
5066 pcie_gen_status_t
*gen_status
= (pcie_gen_status_t
*)msg
;
5067 DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
5068 gen_status
->cmn_hdr
.request_id
, gen_status
->compl_hdr
.status
,
5069 gen_status
->compl_hdr
.flow_ring_id
));
5071 /* How do we track this to pair it with ??? */
5076 * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
5077 * dongle received the ioctl message in dongle memory.
5080 dhd_prot_ioctack_process(dhd_pub_t
*dhd
, void *msg
)
5082 ioctl_req_ack_msg_t
*ioct_ack
= (ioctl_req_ack_msg_t
*)msg
;
5083 unsigned long flags
;
5084 #if defined(DHD_PKTID_AUDIT_RING)
5085 uint32 pktid
= ltoh32(ioct_ack
->cmn_hdr
.request_id
);
5088 #if defined(DHD_PKTID_AUDIT_RING)
5089 /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
5090 if (pktid
!= DHD_IOCTL_REQ_PKTID
) {
5091 #ifndef IOCTLRESP_USE_CONSTMEM
5092 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_ctrl_map
, pktid
,
5093 DHD_TEST_IS_ALLOC
, msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5095 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_map_handle_ioctl
, pktid
,
5096 DHD_TEST_IS_ALLOC
, msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5097 #endif /* !IOCTLRESP_USE_CONSTMEM */
5101 dhd
->prot
->ioctl_ack_time
= OSL_LOCALTIME_NS();
5103 DHD_GENERAL_LOCK(dhd
, flags
);
5104 if ((dhd
->prot
->ioctl_state
& MSGBUF_IOCTL_ACK_PENDING
) &&
5105 (dhd
->prot
->ioctl_state
& MSGBUF_IOCTL_RESP_PENDING
)) {
5106 dhd
->prot
->ioctl_state
&= ~MSGBUF_IOCTL_ACK_PENDING
;
5108 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
5109 __FUNCTION__
, dhd
->prot
->ioctl_state
, dhd
->prot
->ioctl_trans_id
));
5110 prhex("dhd_prot_ioctack_process:",
5111 (uchar
*)msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5113 DHD_GENERAL_UNLOCK(dhd
, flags
);
5115 DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
5116 ioct_ack
->cmn_hdr
.request_id
, ioct_ack
->compl_hdr
.status
,
5117 ioct_ack
->compl_hdr
.flow_ring_id
));
5118 if (ioct_ack
->compl_hdr
.status
!= 0) {
5119 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
5123 /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
5125 dhd_prot_ioctcmplt_process(dhd_pub_t
*dhd
, void *msg
)
5127 dhd_prot_t
*prot
= dhd
->prot
;
5128 uint32 pkt_id
, xt_id
;
5129 ioctl_comp_resp_msg_t
*ioct_resp
= (ioctl_comp_resp_msg_t
*)msg
;
5131 unsigned long flags
;
5132 dhd_dma_buf_t retbuf
;
5134 memset(&retbuf
, 0, sizeof(dhd_dma_buf_t
));
5136 pkt_id
= ltoh32(ioct_resp
->cmn_hdr
.request_id
);
5138 #if defined(DHD_PKTID_AUDIT_RING)
5139 #ifndef IOCTLRESP_USE_CONSTMEM
5140 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, prot
->pktid_ctrl_map
, pkt_id
,
5141 DHD_DUPLICATE_FREE
, msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5143 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, prot
->pktid_map_handle_ioctl
, pkt_id
,
5144 DHD_DUPLICATE_FREE
, msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5145 #endif /* !IOCTLRESP_USE_CONSTMEM */
5148 DHD_GENERAL_LOCK(dhd
, flags
);
5149 if ((prot
->ioctl_state
& MSGBUF_IOCTL_ACK_PENDING
) ||
5150 !(prot
->ioctl_state
& MSGBUF_IOCTL_RESP_PENDING
)) {
5151 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
5152 __FUNCTION__
, dhd
->prot
->ioctl_state
, dhd
->prot
->ioctl_trans_id
));
5153 prhex("dhd_prot_ioctcmplt_process:",
5154 (uchar
*)msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5155 DHD_GENERAL_UNLOCK(dhd
, flags
);
5159 dhd
->prot
->ioctl_cmplt_time
= OSL_LOCALTIME_NS();
5161 /* Clear Response pending bit */
5162 prot
->ioctl_state
&= ~MSGBUF_IOCTL_RESP_PENDING
;
5163 DHD_GENERAL_UNLOCK(dhd
, flags
);
5165 #ifndef IOCTLRESP_USE_CONSTMEM
5166 pkt
= dhd_prot_packet_get(dhd
, pkt_id
, PKTTYPE_IOCTL_RX
, TRUE
);
5168 dhd_prot_ioctl_ret_buffer_get(dhd
, pkt_id
, &retbuf
);
5170 #endif /* !IOCTLRESP_USE_CONSTMEM */
5172 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__
));
5173 prhex("dhd_prot_ioctcmplt_process:",
5174 (uchar
*)msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5178 prot
->ioctl_resplen
= ltoh16(ioct_resp
->resp_len
);
5179 prot
->ioctl_status
= ltoh16(ioct_resp
->compl_hdr
.status
);
5180 xt_id
= ltoh16(ioct_resp
->trans_id
);
5182 if (xt_id
!= prot
->ioctl_trans_id
|| prot
->curr_ioctl_cmd
!= ioct_resp
->cmd
) {
5183 DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
5184 __FUNCTION__
, xt_id
, prot
->ioctl_trans_id
,
5185 prot
->curr_ioctl_cmd
, ioct_resp
->cmd
));
5186 dhd_wakeup_ioctl_event(dhd
, IOCTL_RETURN_ON_ERROR
);
5187 dhd_prot_debug_info_print(dhd
);
5188 #ifdef DHD_FW_COREDUMP
5189 if (dhd
->memdump_enabled
) {
5190 /* collect core dump */
5191 dhd
->memdump_type
= DUMP_TYPE_TRANS_ID_MISMATCH
;
5192 dhd_bus_mem_dump(dhd
);
5196 #endif /* DHD_FW_COREDUMP */
5197 dhd_schedule_reset(dhd
);
5200 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
5201 pkt_id
, xt_id
, prot
->ioctl_status
, prot
->ioctl_resplen
));
5203 if (prot
->ioctl_resplen
> 0) {
5204 #ifndef IOCTLRESP_USE_CONSTMEM
5205 bcopy(PKTDATA(dhd
->osh
, pkt
), prot
->retbuf
.va
, prot
->ioctl_resplen
);
5207 bcopy(pkt
, prot
->retbuf
.va
, prot
->ioctl_resplen
);
5208 #endif /* !IOCTLRESP_USE_CONSTMEM */
5211 /* wake up any dhd_os_ioctl_resp_wait() */
5212 dhd_wakeup_ioctl_event(dhd
, IOCTL_RETURN_ON_SUCCESS
);
5215 #ifndef IOCTLRESP_USE_CONSTMEM
5216 dhd_prot_packet_free(dhd
, pkt
,
5217 PKTTYPE_IOCTL_RX
, FALSE
);
5219 free_ioctl_return_buffer(dhd
, &retbuf
);
5220 #endif /* !IOCTLRESP_USE_CONSTMEM */
5222 /* Post another ioctl buf to the device */
5223 if (prot
->cur_ioctlresp_bufs_posted
> 0) {
5224 prot
->cur_ioctlresp_bufs_posted
--;
5227 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd
);
5230 /** called on MSG_TYPE_TX_STATUS message received from dongle */
5231 static void BCMFASTPATH
5232 dhd_prot_txstatus_process(dhd_pub_t
*dhd
, void *msg
)
5234 dhd_prot_t
*prot
= dhd
->prot
;
5235 host_txbuf_cmpl_t
* txstatus
;
5236 unsigned long flags
;
5244 msgbuf_ring_t
*ring
= &dhd
->prot
->d2hring_tx_cpln
;
5245 #ifdef TX_STATUS_LATENCY_STATS
5246 flow_info_t
*flow_info
;
5247 uint64 tx_status_latency
;
5248 #endif /* TX_STATUS_LATENCY_STATS */
5249 #if defined(TX_STATUS_LATENCY_STATS)
5250 flow_ring_node_t
*flow_ring_node
;
5254 txstatus
= (host_txbuf_cmpl_t
*)msg
;
5255 #if defined(TX_STATUS_LATENCY_STATS)
5256 flowid
= txstatus
->compl_hdr
.flow_ring_id
;
5257 flow_ring_node
= DHD_FLOW_RING(dhd
, flowid
);
5260 /* locks required to protect circular buffer accesses */
5261 DHD_RING_LOCK(ring
->ring_lock
, flags
);
5262 pktid
= ltoh32(txstatus
->cmn_hdr
.request_id
);
5265 #if defined(DHD_PKTID_AUDIT_RING)
5266 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_tx_map
, pktid
,
5267 DHD_DUPLICATE_FREE
, msg
, D2HRING_TXCMPLT_ITEMSIZE
);
5270 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid
));
5271 if (OSL_ATOMIC_DEC_RETURN(dhd
->osh
, &prot
->active_tx_count
) < 0) {
5272 DHD_ERROR(("Extra packets are freed\n"));
5276 pkt
= DHD_PKTID_TO_NATIVE(dhd
, dhd
->prot
->pktid_tx_map
, pktid
,
5277 pa
, len
, dmah
, secdma
, PKTTYPE_DATA_TX
);
5279 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5280 DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__
));
5281 prhex("dhd_prot_txstatus_process:", (uchar
*)msg
, D2HRING_TXCMPLT_ITEMSIZE
);
5282 #ifdef DHD_FW_COREDUMP
5283 if (dhd
->memdump_enabled
) {
5284 /* collect core dump */
5285 dhd
->memdump_type
= DUMP_TYPE_PKTID_INVALID
;
5286 dhd_bus_mem_dump(dhd
);
5290 #endif /* DHD_FW_COREDUMP */
5294 if (SECURE_DMA_ENAB(dhd
->osh
)) {
5296 BCM_REFERENCE(offset
);
5298 if (dhd
->prot
->tx_metadata_offset
)
5299 offset
= dhd
->prot
->tx_metadata_offset
+ ETHER_HDR_LEN
;
5300 SECURE_DMA_UNMAP(dhd
->osh
, (uint
) pa
,
5301 (uint
) dhd
->prot
->tx_metadata_offset
, DMA_RX
, 0, dmah
,
5304 DMA_UNMAP(dhd
->osh
, pa
, (uint
) len
, DMA_RX
, 0, dmah
);
5307 #ifdef TX_STATUS_LATENCY_STATS
5308 /* update the tx status latency for flowid */
5309 flow_info
= &flow_ring_node
->flow_info
;
5310 tx_status_latency
= OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt
);
5311 flow_info
->cum_tx_status_latency
+= tx_status_latency
;
5312 flow_info
->num_tx_status
++;
5313 #endif /* TX_STATUS_LATENCY_STATS */
5314 #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
5320 workq
= &prot
->tx_compl_prod
;
5322 * Produce the packet into the tx_compl workq for the tx compl tasklet
5325 OSL_PREFETCH(PKTTAG(pkt
));
5327 /* fetch next available slot in workq */
5328 elem_ix
= bcm_ring_prod(WORKQ_RING(workq
), DHD_LB_WORKQ_SZ
);
5330 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t
*)PKTTAG(pkt
), pa
);
5331 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t
*)PKTTAG(pkt
), len
);
5333 if (elem_ix
== BCM_RING_FULL
) {
5334 DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
5335 goto workq_ring_full
;
5338 elem
= WORKQ_ELEMENT(void *, &prot
->tx_compl_prod
, elem_ix
);
5343 /* Sync WR index to consumer if the SYNC threshold has been reached */
5344 if (++prot
->tx_compl_prod_sync
>= DHD_LB_WORKQ_SYNC
) {
5345 bcm_workq_prod_sync(workq
);
5346 prot
->tx_compl_prod_sync
= 0;
5349 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
5350 __FUNCTION__
, pkt
, prot
->tx_compl_prod_sync
));
5352 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5358 #endif /* !DHD_LB_TXC */
5361 dhd
->dma_stats
.txdata
--;
5362 dhd
->dma_stats
.txdata_sz
-= len
;
5363 #endif /* DMAMAP_STATS */
5364 pkt_fate
= dhd_dbg_process_tx_status(dhd
, pkt
, pktid
,
5365 ltoh16(txstatus
->compl_hdr
.status
) & WLFC_CTL_PKTFLAG_MASK
);
5366 #ifdef DHD_PKT_LOGGING
5367 if (dhd
->d11_tx_status
) {
5368 DHD_PKTLOG_TXS(dhd
, pkt
, pktid
,
5369 ltoh16(txstatus
->compl_hdr
.status
) & WLFC_CTL_PKTFLAG_MASK
);
5371 #endif /* DHD_PKT_LOGGING */
5372 #if defined(BCMPCIE)
5373 dhd_txcomplete(dhd
, pkt
, pkt_fate
);
5376 #if DHD_DBG_SHOW_METADATA
5377 if (dhd
->prot
->metadata_dbg
&&
5378 dhd
->prot
->tx_metadata_offset
&& txstatus
->metadata_len
) {
5380 /* The Ethernet header of TX frame was copied and removed.
5381 * Here, move the data pointer forward by Ethernet header size.
5383 PKTPULL(dhd
->osh
, pkt
, ETHER_HDR_LEN
);
5384 ptr
= PKTDATA(dhd
->osh
, pkt
) - (dhd
->prot
->tx_metadata_offset
);
5385 bcm_print_bytes("txmetadata", ptr
, txstatus
->metadata_len
);
5386 dhd_prot_print_metadata(dhd
, ptr
, txstatus
->metadata_len
);
5388 #endif /* DHD_DBG_SHOW_METADATA */
5390 #ifdef DHD_LBUF_AUDIT
5391 PKTAUDIT(dhd
->osh
, pkt
);
5394 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5395 PKTFREE(dhd
->osh
, pkt
, TRUE
);
5396 DHD_RING_LOCK(ring
->ring_lock
, flags
);
5397 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd
->bus
, txstatus
->compl_hdr
.flow_ring_id
,
5398 txstatus
->tx_status
);
5399 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5402 } /* dhd_prot_txstatus_process */
5404 /** called on MSG_TYPE_WL_EVENT message received from dongle */
5406 dhd_prot_event_process(dhd_pub_t
*dhd
, void *msg
)
5408 wlevent_req_msg_t
*evnt
;
5413 dhd_prot_t
*prot
= dhd
->prot
;
5415 /* Event complete header */
5416 evnt
= (wlevent_req_msg_t
*)msg
;
5417 bufid
= ltoh32(evnt
->cmn_hdr
.request_id
);
5419 #if defined(DHD_PKTID_AUDIT_RING)
5420 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_ctrl_map
, bufid
,
5421 DHD_DUPLICATE_FREE
, msg
, D2HRING_CTRL_CMPLT_ITEMSIZE
);
5424 buflen
= ltoh16(evnt
->event_data_len
);
5426 ifidx
= BCMMSGBUF_API_IFIDX(&evnt
->cmn_hdr
);
5428 /* Post another rxbuf to the device */
5429 if (prot
->cur_event_bufs_posted
)
5430 prot
->cur_event_bufs_posted
--;
5431 dhd_msgbuf_rxbuf_post_event_bufs(dhd
);
5433 pkt
= dhd_prot_packet_get(dhd
, bufid
, PKTTYPE_EVENT_RX
, TRUE
);
5436 DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__
, bufid
));
5440 /* DMA RX offset updated through shared area */
5441 if (dhd
->prot
->rx_dataoffset
)
5442 PKTPULL(dhd
->osh
, pkt
, dhd
->prot
->rx_dataoffset
);
5444 PKTSETLEN(dhd
->osh
, pkt
, buflen
);
5445 #ifdef DHD_LBUF_AUDIT
5446 PKTAUDIT(dhd
->osh
, pkt
);
5448 dhd_bus_rx_frame(dhd
->bus
, pkt
, ifidx
, 1);
5451 /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
5452 static void BCMFASTPATH
5453 dhd_prot_process_infobuf_complete(dhd_pub_t
*dhd
, void* buf
)
5455 info_buf_resp_t
*resp
;
5460 resp
= (info_buf_resp_t
*)buf
;
5461 pktid
= ltoh32(resp
->cmn_hdr
.request_id
);
5462 buflen
= ltoh16(resp
->info_data_len
);
5464 #ifdef DHD_PKTID_AUDIT_RING
5465 DHD_PKTID_AUDIT_RING_DEBUG(dhd
, dhd
->prot
->pktid_ctrl_map
, pktid
,
5466 DHD_DUPLICATE_FREE
, buf
, D2HRING_INFO_BUFCMPLT_ITEMSIZE
);
5467 #endif /* DHD_PKTID_AUDIT_RING */
5469 DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
5470 pktid
, buflen
, resp
->cmn_hdr
.flags
, ltoh16(resp
->seqnum
),
5471 dhd
->prot
->rx_dataoffset
));
5473 pkt
= dhd_prot_packet_get(dhd
, pktid
, PKTTYPE_INFO_RX
, TRUE
);
5477 /* DMA RX offset updated through shared area */
5478 if (dhd
->prot
->rx_dataoffset
)
5479 PKTPULL(dhd
->osh
, pkt
, dhd
->prot
->rx_dataoffset
);
5481 PKTSETLEN(dhd
->osh
, pkt
, buflen
);
5483 #ifdef DHD_LBUF_AUDIT
5484 PKTAUDIT(dhd
->osh
, pkt
);
5487 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5488 * special ifidx of -1. This is just internal to dhd to get the data to
5489 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
5491 dhd_bus_rx_frame(dhd
->bus
, pkt
, DHD_DUMMY_INFO_IF
/* ifidx HACK */, 1);
5494 /** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
5495 static void BCMFASTPATH
5496 dhd_prot_process_snapshot_complete(dhd_pub_t
*dhd
, void *buf
)
5500 /** Stop protocol: sync w/dongle state. */
5501 void dhd_prot_stop(dhd_pub_t
*dhd
)
5504 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5508 /* Add any protocol-specific data header.
5509 * Caller must reserve prot_hdrlen prepend space.
5512 dhd_prot_hdrpush(dhd_pub_t
*dhd
, int ifidx
, void *PKTBUF
)
5518 dhd_prot_hdrlen(dhd_pub_t
*dhd
, void *PKTBUF
)
5523 #define MAX_MTU_SZ (1600u)
5525 #define PKTBUF pktbuf
5528 * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
5529 * the corresponding flow ring.
5532 dhd_prot_txdata(dhd_pub_t
*dhd
, void *PKTBUF
, uint8 ifidx
)
5534 unsigned long flags
;
5535 dhd_prot_t
*prot
= dhd
->prot
;
5536 host_txbuf_post_t
*txdesc
= NULL
;
5537 dmaaddr_t pa
, meta_pa
;
5545 msgbuf_ring_t
*ring
;
5546 flow_ring_table_t
*flow_ring_table
;
5547 flow_ring_node_t
*flow_ring_node
;
5549 if (dhd
->flow_ring_table
== NULL
) {
5550 return BCME_NORESOURCE
;
5553 flowid
= DHD_PKT_GET_FLOWID(PKTBUF
);
5554 flow_ring_table
= (flow_ring_table_t
*)dhd
->flow_ring_table
;
5555 flow_ring_node
= (flow_ring_node_t
*)&flow_ring_table
[flowid
];
5557 ring
= (msgbuf_ring_t
*)flow_ring_node
->prot_info
;
5559 DHD_RING_LOCK(ring
->ring_lock
, flags
);
5561 /* Create a unique 32-bit packet id */
5562 pktid
= DHD_NATIVE_TO_PKTID_RSV(dhd
, dhd
->prot
->pktid_tx_map
,
5563 PKTBUF
, PKTTYPE_DATA_TX
);
5564 #if defined(DHD_PCIE_PKTID)
5565 if (pktid
== DHD_PKTID_INVALID
) {
5566 DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__
));
5568 * If we return error here, the caller would queue the packet
5569 * again. So we'll just free the skb allocated in DMA Zone.
5570 * Since we have not freed the original SKB yet the caller would
5573 goto err_no_res_pktfree
;
5575 #endif /* DHD_PCIE_PKTID */
5577 /* Reserve space in the circular buffer */
5578 txdesc
= (host_txbuf_post_t
*)
5579 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
5580 if (txdesc
== NULL
) {
5581 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
5582 __FUNCTION__
, __LINE__
, OSL_ATOMIC_READ(dhd
->osh
, &prot
->active_tx_count
)));
5583 goto err_free_pktid
;
5586 DHD_DBG_PKT_MON_TX(dhd
, PKTBUF
, pktid
);
5587 #ifdef DHD_PKT_LOGGING
5588 DHD_PKTLOG_TX(dhd
, PKTBUF
, pktid
);
5589 #endif /* DHD_PKT_LOGGING */
5591 /* Extract the data pointer and length information */
5592 pktdata
= PKTDATA(dhd
->osh
, PKTBUF
);
5593 pktlen
= PKTLEN(dhd
->osh
, PKTBUF
);
5595 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
5596 bcopy(pktdata
, txdesc
->txhdr
, ETHER_HDR_LEN
);
5598 /* Extract the ethernet header and adjust the data pointer and length */
5599 pktdata
= PKTPULL(dhd
->osh
, PKTBUF
, ETHER_HDR_LEN
);
5600 pktlen
-= ETHER_HDR_LEN
;
5602 /* Map the data pointer to a DMA-able address */
5603 if (SECURE_DMA_ENAB(dhd
->osh
)) {
5605 BCM_REFERENCE(offset
);
5607 if (prot
->tx_metadata_offset
)
5608 offset
= prot
->tx_metadata_offset
+ ETHER_HDR_LEN
;
5610 pa
= SECURE_DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, PKTBUF
), pktlen
,
5611 DMA_TX
, PKTBUF
, 0, ring
->dma_buf
.secdma
, offset
);
5613 #ifndef BCM_SECURE_DMA
5615 pa
= DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, PKTBUF
), pktlen
, DMA_TX
, PKTBUF
, 0);
5616 #endif /* #ifndef BCM_SECURE_DMA */
5618 if (PHYSADDRISZERO(pa
)) {
5619 DHD_ERROR(("%s: Something really bad, unless 0 is "
5620 "a valid phyaddr for pa\n", __FUNCTION__
));
5622 goto err_rollback_idx
;
5626 dhd
->dma_stats
.txdata
++;
5627 dhd
->dma_stats
.txdata_sz
+= pktlen
;
5628 #endif /* DMAMAP_STATS */
5629 /* No need to lock. Save the rest of the packet's metadata */
5630 DHD_NATIVE_TO_PKTID_SAVE(dhd
, dhd
->prot
->pktid_tx_map
, PKTBUF
, pktid
,
5631 pa
, pktlen
, DMA_TX
, NULL
, ring
->dma_buf
.secdma
, PKTTYPE_DATA_TX
);
5633 #ifdef TXP_FLUSH_NITEMS
5634 if (ring
->pend_items_count
== 0)
5635 ring
->start_addr
= (void *)txdesc
;
5636 ring
->pend_items_count
++;
5639 /* Form the Tx descriptor message buffer */
5641 /* Common message hdr */
5642 txdesc
->cmn_hdr
.msg_type
= MSG_TYPE_TX_POST
;
5643 txdesc
->cmn_hdr
.if_id
= ifidx
;
5644 txdesc
->cmn_hdr
.flags
= ring
->current_phase
;
5646 txdesc
->flags
= BCMPCIE_PKT_FLAGS_FRAME_802_3
;
5647 prio
= (uint8
)PKTPRIO(PKTBUF
);
5649 txdesc
->flags
|= (prio
& 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT
;
5650 txdesc
->seg_cnt
= 1;
5652 txdesc
->data_len
= htol16((uint16
) pktlen
);
5653 txdesc
->data_buf_addr
.high_addr
= htol32(PHYSADDRHI(pa
));
5654 txdesc
->data_buf_addr
.low_addr
= htol32(PHYSADDRLO(pa
));
5656 /* Move data pointer to keep ether header in local PKTBUF for later reference */
5657 PKTPUSH(dhd
->osh
, PKTBUF
, ETHER_HDR_LEN
);
5659 /* Handle Tx metadata */
5660 headroom
= (uint16
)PKTHEADROOM(dhd
->osh
, PKTBUF
);
5661 if (prot
->tx_metadata_offset
&& (headroom
< prot
->tx_metadata_offset
))
5662 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
5663 prot
->tx_metadata_offset
, headroom
));
5665 if (prot
->tx_metadata_offset
&& (headroom
>= prot
->tx_metadata_offset
)) {
5666 DHD_TRACE(("Metadata in tx %d\n", prot
->tx_metadata_offset
));
5668 /* Adjust the data pointer to account for meta data in DMA_MAP */
5669 PKTPUSH(dhd
->osh
, PKTBUF
, prot
->tx_metadata_offset
);
5671 if (SECURE_DMA_ENAB(dhd
->osh
)) {
5672 meta_pa
= SECURE_DMA_MAP_TXMETA(dhd
->osh
, PKTDATA(dhd
->osh
, PKTBUF
),
5673 prot
->tx_metadata_offset
+ ETHER_HDR_LEN
, DMA_RX
, PKTBUF
,
5674 0, ring
->dma_buf
.secdma
);
5676 #ifndef BCM_SECURE_DMA
5678 meta_pa
= DMA_MAP(dhd
->osh
, PKTDATA(dhd
->osh
, PKTBUF
),
5679 prot
->tx_metadata_offset
, DMA_RX
, PKTBUF
, 0);
5680 #endif /* #ifndef BCM_SECURE_DMA */
5682 if (PHYSADDRISZERO(meta_pa
)) {
5683 /* Unmap the data pointer to a DMA-able address */
5684 if (SECURE_DMA_ENAB(dhd
->osh
)) {
5686 BCM_REFERENCE(offset
);
5688 if (prot
->tx_metadata_offset
) {
5689 offset
= prot
->tx_metadata_offset
+ ETHER_HDR_LEN
;
5692 SECURE_DMA_UNMAP(dhd
->osh
, pa
, pktlen
,
5693 DMA_TX
, 0, DHD_DMAH_NULL
, ring
->dma_buf
.secdma
, offset
);
5695 #ifndef BCM_SECURE_DMA
5697 DMA_UNMAP(dhd
->osh
, pa
, pktlen
, DMA_TX
, 0, DHD_DMAH_NULL
);
5699 #endif /* #ifndef BCM_SECURE_DMA */
5700 #ifdef TXP_FLUSH_NITEMS
5701 /* update pend_items_count */
5702 ring
->pend_items_count
--;
5703 #endif /* TXP_FLUSH_NITEMS */
5705 DHD_ERROR(("%s: Something really bad, unless 0 is "
5706 "a valid phyaddr for meta_pa\n", __FUNCTION__
));
5708 goto err_rollback_idx
;
5711 /* Adjust the data pointer back to original value */
5712 PKTPULL(dhd
->osh
, PKTBUF
, prot
->tx_metadata_offset
);
5714 txdesc
->metadata_buf_len
= prot
->tx_metadata_offset
;
5715 txdesc
->metadata_buf_addr
.high_addr
= htol32(PHYSADDRHI(meta_pa
));
5716 txdesc
->metadata_buf_addr
.low_addr
= htol32(PHYSADDRLO(meta_pa
));
5718 txdesc
->metadata_buf_len
= htol16(0);
5719 txdesc
->metadata_buf_addr
.high_addr
= 0;
5720 txdesc
->metadata_buf_addr
.low_addr
= 0;
5723 #ifdef DHD_PKTID_AUDIT_RING
5724 DHD_PKTID_AUDIT(dhd
, prot
->pktid_tx_map
, pktid
, DHD_DUPLICATE_ALLOC
);
5725 #endif /* DHD_PKTID_AUDIT_RING */
5727 txdesc
->cmn_hdr
.request_id
= htol32(pktid
);
5729 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc
->data_len
,
5730 txdesc
->cmn_hdr
.request_id
));
5732 #ifdef DHD_LBUF_AUDIT
5733 PKTAUDIT(dhd
->osh
, PKTBUF
);
5736 if (pktlen
> MAX_MTU_SZ
) {
5737 DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
5738 __FUNCTION__
, pktlen
, MAX_MTU_SZ
));
5739 dhd_prhex("txringitem", (volatile uchar
*)txdesc
,
5740 sizeof(host_txbuf_post_t
), DHD_ERROR_VAL
);
5743 /* Update the write pointer in TCM & ring bell */
5744 #ifdef TXP_FLUSH_NITEMS
5745 /* Flush if we have either hit the txp_threshold or if this msg is */
5746 /* occupying the last slot in the flow_ring - before wrap around. */
5747 if ((ring
->pend_items_count
== prot
->txp_threshold
) ||
5748 ((uint8
*) txdesc
== (uint8
*) DHD_RING_END_VA(ring
))) {
5749 dhd_prot_txdata_write_flush(dhd
, flowid
);
5752 /* update ring's WR index and ring doorbell to dongle */
5753 dhd_prot_ring_write_complete(dhd
, ring
, txdesc
, 1);
5755 #ifdef TX_STATUS_LATENCY_STATS
5756 /* set the time when pkt is queued to flowring */
5757 DHD_PKT_SET_QTIME(PKTBUF
, OSL_SYSUPTIME_US());
5758 #endif /* TX_STATUS_LATENCY_STATS */
5760 OSL_ATOMIC_INC(dhd
->osh
, &prot
->active_tx_count
);
5762 * Take a wake lock, do not sleep if we have atleast one packet
5765 DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd
, MAX_TX_TIMEOUT
);
5767 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5772 /* roll back write pointer for unprocessed message */
5773 if (ring
->wr
== 0) {
5774 ring
->wr
= ring
->max_items
- 1;
5777 if (ring
->wr
== 0) {
5778 DHD_INFO(("%s: flipping the phase now\n", ring
->name
));
5779 ring
->current_phase
= ring
->current_phase
?
5780 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
5785 #if defined(DHD_PCIE_PKTID)
5789 /* Free up the PKTID. physaddr and pktlen will be garbage. */
5790 DHD_PKTID_TO_NATIVE(dhd
, dhd
->prot
->pktid_tx_map
, pktid
,
5791 pa
, pktlen
, dmah
, secdma
, PKTTYPE_NO_CHECK
);
5795 #endif /* DHD_PCIE_PKTID */
5797 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
5799 return BCME_NORESOURCE
;
5800 } /* dhd_prot_txdata */
5802 /* called with a ring_lock */
5803 /** optimization to write "n" tx items at a time to ring */
5805 dhd_prot_txdata_write_flush(dhd_pub_t
*dhd
, uint16 flowid
)
5807 #ifdef TXP_FLUSH_NITEMS
5808 flow_ring_table_t
*flow_ring_table
;
5809 flow_ring_node_t
*flow_ring_node
;
5810 msgbuf_ring_t
*ring
;
5812 if (dhd
->flow_ring_table
== NULL
) {
5816 flow_ring_table
= (flow_ring_table_t
*)dhd
->flow_ring_table
;
5817 flow_ring_node
= (flow_ring_node_t
*)&flow_ring_table
[flowid
];
5818 ring
= (msgbuf_ring_t
*)flow_ring_node
->prot_info
;
5820 if (ring
->pend_items_count
) {
5821 /* update ring's WR index and ring doorbell to dongle */
5822 dhd_prot_ring_write_complete(dhd
, ring
, ring
->start_addr
,
5823 ring
->pend_items_count
);
5824 ring
->pend_items_count
= 0;
5825 ring
->start_addr
= NULL
;
5827 #endif /* TXP_FLUSH_NITEMS */
5830 #undef PKTBUF /* Only defined in the above routine */
5833 dhd_prot_hdrpull(dhd_pub_t
*dhd
, int *ifidx
, void *pkt
, uchar
*buf
, uint
*len
)
5838 /** post a set of receive buffers to the dongle */
5839 static void BCMFASTPATH
5840 dhd_prot_return_rxbuf(dhd_pub_t
*dhd
, uint32 pktid
, uint32 rxcnt
)
5842 dhd_prot_t
*prot
= dhd
->prot
;
5843 #if defined(DHD_LB_RXC)
5848 workq
= &prot
->rx_compl_prod
;
5850 /* Produce the work item */
5851 elem_ix
= bcm_ring_prod(WORKQ_RING(workq
), DHD_LB_WORKQ_SZ
);
5852 if (elem_ix
== BCM_RING_FULL
) {
5853 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__
));
5858 elem
= WORKQ_ELEMENT(uint32
, workq
, elem_ix
);
5863 /* Sync WR index to consumer if the SYNC threshold has been reached */
5864 if (++prot
->rx_compl_prod_sync
>= DHD_LB_WORKQ_SYNC
) {
5865 bcm_workq_prod_sync(workq
);
5866 prot
->rx_compl_prod_sync
= 0;
5869 DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
5870 __FUNCTION__
, pktid
, prot
->rx_compl_prod_sync
));
5872 #endif /* DHD_LB_RXC */
5874 if (prot
->rxbufpost
>= rxcnt
) {
5875 prot
->rxbufpost
-= (uint16
)rxcnt
;
5878 prot
->rxbufpost
= 0;
5881 #if !defined(DHD_LB_RXC)
5882 if (prot
->rxbufpost
<= (prot
->max_rxbufpost
- RXBUFPOST_THRESHOLD
))
5883 dhd_msgbuf_rxbuf_post(dhd
, FALSE
); /* alloc pkt ids */
5884 #endif /* !DHD_LB_RXC */
5888 /* called before an ioctl is sent to the dongle */
5890 dhd_prot_wlioctl_intercept(dhd_pub_t
*dhd
, wl_ioctl_t
* ioc
, void * buf
)
5892 dhd_prot_t
*prot
= dhd
->prot
;
5895 if (ioc
->cmd
== WLC_SET_VAR
&& buf
!= NULL
&& !strcmp(buf
, "pcie_bus_tput")) {
5896 pcie_bus_tput_params_t
*tput_params
;
5898 slen
= strlen("pcie_bus_tput") + 1;
5899 tput_params
= (pcie_bus_tput_params_t
*)((char *)buf
+ slen
);
5900 bcopy(&prot
->host_bus_throughput_buf
.pa
, &tput_params
->host_buf_addr
,
5901 sizeof(tput_params
->host_buf_addr
));
5902 tput_params
->host_buf_len
= DHD_BUS_TPUT_BUF_LEN
;
5907 /* called after an ioctl returns from dongle */
5909 dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t
*dhd
, wl_ioctl_t
* ioc
, void * buf
,
5910 int ifidx
, int ret
, int len
)
5913 if (!ret
&& ioc
->cmd
== WLC_SET_VAR
&& buf
!= NULL
) {
5914 /* Intercept the wme_dp ioctl here */
5915 if (!strcmp(buf
, "wme_dp")) {
5918 slen
= strlen("wme_dp") + 1;
5919 if (len
>= (int)(slen
+ sizeof(int)))
5920 bcopy(((char *)buf
+ slen
), &val
, sizeof(int));
5921 dhd
->wme_dp
= (uint8
) ltoh32(val
);
5928 #ifdef DHD_PM_CONTROL_FROM_FILE
5929 extern bool g_pm_control
;
5930 #endif /* DHD_PM_CONTROL_FROM_FILE */
5932 /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
5933 int dhd_prot_ioctl(dhd_pub_t
*dhd
, int ifidx
, wl_ioctl_t
* ioc
, void * buf
, int len
)
5938 if (dhd
->bus
->is_linkdown
) {
5939 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__
));
5943 if ((dhd
->busstate
== DHD_BUS_DOWN
) || dhd
->hang_was_sent
) {
5944 DHD_ERROR(("%s : bus is down. we have nothing to do -"
5945 " bus state: %d, sent hang: %d\n", __FUNCTION__
,
5946 dhd
->busstate
, dhd
->hang_was_sent
));
5950 if (dhd
->busstate
== DHD_BUS_SUSPEND
) {
5951 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__
));
5955 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
5957 if (ioc
->cmd
== WLC_SET_PM
) {
5958 #ifdef DHD_PM_CONTROL_FROM_FILE
5959 if (g_pm_control
== TRUE
) {
5960 DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
5961 __FUNCTION__
, buf
? *(char *)buf
: 0));
5964 #endif /* DHD_PM_CONTROL_FROM_FILE */
5965 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__
, buf
? *(char *)buf
: 0));
5968 ASSERT(len
<= WLC_IOCTL_MAXLEN
);
5970 if (len
> WLC_IOCTL_MAXLEN
)
5975 dhd_prot_wlioctl_intercept(dhd
, ioc
, buf
);
5977 if (action
& WL_IOCTL_ACTION_SET
) {
5978 ret
= dhd_msgbuf_set_ioctl(dhd
, ifidx
, ioc
->cmd
, buf
, len
, action
);
5980 ret
= dhd_msgbuf_query_ioctl(dhd
, ifidx
, ioc
->cmd
, buf
, len
, action
);
5985 /* Too many programs assume ioctl() returns 0 on success */
5989 dhd
->dongle_error
= ret
;
5992 dhd_prot_wl_ioctl_ret_intercept(dhd
, ioc
, buf
, ifidx
, ret
, len
);
5997 } /* dhd_prot_ioctl */
5999 /** test / loopback */
6002 dhdmsgbuf_lpbk_req(dhd_pub_t
*dhd
, uint len
)
6004 unsigned long flags
;
6005 dhd_prot_t
*prot
= dhd
->prot
;
6008 ioct_reqst_hdr_t
*ioct_rqst
;
6010 uint16 hdrlen
= sizeof(ioct_reqst_hdr_t
);
6011 uint16 msglen
= len
+ hdrlen
;
6012 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
6014 msglen
= ALIGN_SIZE(msglen
, DMA_ALIGN_LEN
);
6015 msglen
= LIMIT_TO_MAX(msglen
, MSGBUF_MAX_MSG_SIZE
);
6017 DHD_RING_LOCK(ring
->ring_lock
, flags
);
6019 ioct_rqst
= (ioct_reqst_hdr_t
*)
6020 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
6022 if (ioct_rqst
== NULL
) {
6023 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6031 ptr
= (uint8
*)ioct_rqst
;
6032 for (i
= 0; i
< msglen
; i
++) {
6037 /* Common msg buf hdr */
6038 ioct_rqst
->msg
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
6041 ioct_rqst
->msg
.msg_type
= MSG_TYPE_LOOPBACK
;
6042 ioct_rqst
->msg
.if_id
= 0;
6043 ioct_rqst
->msg
.flags
= ring
->current_phase
;
6045 bcm_print_bytes("LPBK REQ: ", (uint8
*)ioct_rqst
, msglen
);
6047 /* update ring's WR index and ring doorbell to dongle */
6048 dhd_prot_ring_write_complete(dhd
, ring
, ioct_rqst
, 1);
6050 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6055 /** test / loopback */
6056 void dmaxfer_free_dmaaddr(dhd_pub_t
*dhd
, dhd_dmaxfer_t
*dmaxfer
)
6058 if (dmaxfer
== NULL
)
6061 dhd_dma_buf_free(dhd
, &dmaxfer
->srcmem
);
6062 dhd_dma_buf_free(dhd
, &dmaxfer
->dstmem
);
6065 /** test / loopback */
6067 dhd_prepare_schedule_dmaxfer_free(dhd_pub_t
*dhdp
)
6069 dhd_prot_t
*prot
= dhdp
->prot
;
6070 dhd_dmaxfer_t
*dmaxfer
= &prot
->dmaxfer
;
6071 dmaxref_mem_map_t
*dmap
= NULL
;
6073 dmap
= MALLOCZ(dhdp
->osh
, sizeof(dmaxref_mem_map_t
));
6075 DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__
));
6076 goto mem_alloc_fail
;
6078 dmap
->srcmem
= &(dmaxfer
->srcmem
);
6079 dmap
->dstmem
= &(dmaxfer
->dstmem
);
6081 DMAXFER_FREE(dhdp
, dmap
);
6086 MFREE(dhdp
->osh
, dmap
, sizeof(dmaxref_mem_map_t
));
6090 } /* dhd_prepare_schedule_dmaxfer_free */
6092 /** test / loopback */
6094 dmaxfer_free_prev_dmaaddr(dhd_pub_t
*dhdp
, dmaxref_mem_map_t
*dmmap
)
6097 dhd_dma_buf_free(dhdp
, dmmap
->srcmem
);
6098 dhd_dma_buf_free(dhdp
, dmmap
->dstmem
);
6100 MFREE(dhdp
->osh
, dmmap
, sizeof(dmaxref_mem_map_t
));
6103 } /* dmaxfer_free_prev_dmaaddr */
6105 /** test / loopback */
6106 int dmaxfer_prepare_dmaaddr(dhd_pub_t
*dhd
, uint len
,
6107 uint srcdelay
, uint destdelay
, dhd_dmaxfer_t
*dmaxfer
)
6113 /* First free up existing buffers */
6114 dmaxfer_free_dmaaddr(dhd
, dmaxfer
);
6116 if (dhd_dma_buf_alloc(dhd
, &dmaxfer
->srcmem
, len
)) {
6120 if (dhd_dma_buf_alloc(dhd
, &dmaxfer
->dstmem
, len
+ 8)) {
6121 dhd_dma_buf_free(dhd
, &dmaxfer
->srcmem
);
6127 /* Populate source with a pattern like below
6137 while (i
< dmaxfer
->len
) {
6138 ((uint8
*)dmaxfer
->srcmem
.va
)[i
] = j
% 256;
6145 OSL_CACHE_FLUSH(dmaxfer
->srcmem
.va
, dmaxfer
->len
);
6147 dmaxfer
->srcdelay
= srcdelay
;
6148 dmaxfer
->destdelay
= destdelay
;
6151 } /* dmaxfer_prepare_dmaaddr */
6154 dhd_msgbuf_dmaxfer_process(dhd_pub_t
*dhd
, void *msg
)
6156 dhd_prot_t
*prot
= dhd
->prot
;
6158 pcie_dmaxfer_cmplt_t
*cmplt
= (pcie_dmaxfer_cmplt_t
*)msg
;
6160 BCM_REFERENCE(cmplt
);
6161 end_usec
= OSL_SYSUPTIME_US();
6163 DHD_ERROR(("DMA loopback status: %d\n", cmplt
->compl_hdr
.status
));
6164 prot
->dmaxfer
.status
= cmplt
->compl_hdr
.status
;
6165 OSL_CACHE_INV(prot
->dmaxfer
.dstmem
.va
, prot
->dmaxfer
.len
);
6166 if (prot
->dmaxfer
.srcmem
.va
&& prot
->dmaxfer
.dstmem
.va
) {
6167 if (memcmp(prot
->dmaxfer
.srcmem
.va
,
6168 prot
->dmaxfer
.dstmem
.va
, prot
->dmaxfer
.len
) ||
6169 cmplt
->compl_hdr
.status
!= BCME_OK
) {
6170 DHD_ERROR(("DMA loopback failed\n"));
6172 prot
->dmaxfer
.srcmem
.va
, prot
->dmaxfer
.len
);
6174 prot
->dmaxfer
.dstmem
.va
, prot
->dmaxfer
.len
);
6175 prot
->dmaxfer
.status
= BCME_ERROR
;
6178 switch (prot
->dmaxfer
.d11_lpbk
) {
6179 case M2M_DMA_LPBK
: {
6180 DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
6183 DHD_ERROR(("DMA successful with d11 loopback\n"));
6186 DHD_ERROR(("DMA successful with bmc loopback\n"));
6188 case M2M_NON_DMA_LPBK
: {
6189 DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
6191 case D11_HOST_MEM_LPBK
: {
6192 DHD_ERROR(("DMA successful d11 host mem loopback\n"));
6194 case BMC_HOST_MEM_LPBK
: {
6195 DHD_ERROR(("DMA successful bmc host mem loopback\n"));
6198 DHD_ERROR(("Invalid loopback option\n"));
6202 if (DHD_LPBKDTDUMP_ON()) {
6203 /* debug info print of the Tx and Rx buffers */
6204 dhd_prhex("XFER SRC: ", prot
->dmaxfer
.srcmem
.va
,
6205 prot
->dmaxfer
.len
, DHD_INFO_VAL
);
6206 dhd_prhex("XFER DST: ", prot
->dmaxfer
.dstmem
.va
,
6207 prot
->dmaxfer
.len
, DHD_INFO_VAL
);
6212 dhd_prepare_schedule_dmaxfer_free(dhd
);
6213 end_usec
-= prot
->dmaxfer
.start_usec
;
6215 DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
6216 prot
->dmaxfer
.len
, (unsigned long)end_usec
,
6217 (prot
->dmaxfer
.len
* (1000 * 1000 / 1024) / (uint32
)end_usec
)));
6218 dhd
->prot
->dmaxfer
.in_progress
= FALSE
;
6220 dhd
->bus
->dmaxfer_complete
= TRUE
;
6221 dhd_os_dmaxfer_wake(dhd
);
6224 /** Test functionality.
6225 * Transfers bytes from host to dongle and to host again using DMA
6226 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
6230 dhdmsgbuf_dmaxfer_req(dhd_pub_t
*dhd
, uint len
, uint srcdelay
, uint destdelay
,
6231 uint d11_lpbk
, uint core_num
)
6233 unsigned long flags
;
6235 dhd_prot_t
*prot
= dhd
->prot
;
6236 pcie_dma_xfer_params_t
*dmap
;
6237 uint32 xferlen
= LIMIT_TO_MAX(len
, DMA_XFER_LEN_LIMIT
);
6239 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
6241 if (prot
->dmaxfer
.in_progress
) {
6242 DHD_ERROR(("DMA is in progress...\n"));
6246 if (d11_lpbk
>= MAX_LPBK
) {
6247 DHD_ERROR(("loopback mode should be either"
6248 " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
6252 DHD_RING_LOCK(ring
->ring_lock
, flags
);
6254 prot
->dmaxfer
.in_progress
= TRUE
;
6255 if ((ret
= dmaxfer_prepare_dmaaddr(dhd
, xferlen
, srcdelay
, destdelay
,
6256 &prot
->dmaxfer
)) != BCME_OK
) {
6257 prot
->dmaxfer
.in_progress
= FALSE
;
6258 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6262 dmap
= (pcie_dma_xfer_params_t
*)
6263 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
6266 dmaxfer_free_dmaaddr(dhd
, &prot
->dmaxfer
);
6267 prot
->dmaxfer
.in_progress
= FALSE
;
6268 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6272 /* Common msg buf hdr */
6273 dmap
->cmn_hdr
.msg_type
= MSG_TYPE_LPBK_DMAXFER
;
6274 dmap
->cmn_hdr
.request_id
= htol32(DHD_FAKE_PKTID
);
6275 dmap
->cmn_hdr
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
6276 dmap
->cmn_hdr
.flags
= ring
->current_phase
;
6279 dmap
->host_input_buf_addr
.high
= htol32(PHYSADDRHI(prot
->dmaxfer
.srcmem
.pa
));
6280 dmap
->host_input_buf_addr
.low
= htol32(PHYSADDRLO(prot
->dmaxfer
.srcmem
.pa
));
6281 dmap
->host_ouput_buf_addr
.high
= htol32(PHYSADDRHI(prot
->dmaxfer
.dstmem
.pa
));
6282 dmap
->host_ouput_buf_addr
.low
= htol32(PHYSADDRLO(prot
->dmaxfer
.dstmem
.pa
));
6283 dmap
->xfer_len
= htol32(prot
->dmaxfer
.len
);
6284 dmap
->srcdelay
= htol32(prot
->dmaxfer
.srcdelay
);
6285 dmap
->destdelay
= htol32(prot
->dmaxfer
.destdelay
);
6286 prot
->dmaxfer
.d11_lpbk
= d11_lpbk
;
6287 dmap
->flags
= (((core_num
& PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK
)
6288 << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT
) |
6289 ((prot
->dmaxfer
.d11_lpbk
& PCIE_DMA_XFER_FLG_D11_LPBK_MASK
)
6290 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT
));
6291 prot
->dmaxfer
.start_usec
= OSL_SYSUPTIME_US();
6293 /* update ring's WR index and ring doorbell to dongle */
6294 dhd_prot_ring_write_complete(dhd
, ring
, dmap
, 1);
6296 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6298 DHD_ERROR(("DMA loopback Started...\n"));
6301 } /* dhdmsgbuf_dmaxfer_req */
6304 dhdmsgbuf_dmaxfer_status(dhd_pub_t
*dhd
)
6306 dhd_prot_t
*prot
= dhd
->prot
;
6308 if (prot
->dmaxfer
.in_progress
)
6309 return DMA_XFER_IN_PROGRESS
;
6310 else if (prot
->dmaxfer
.status
== BCME_OK
)
6311 return DMA_XFER_SUCCESS
;
6313 return DMA_XFER_FAILED
;
6316 /** Called in the process of submitting an ioctl to the dongle */
6318 dhd_msgbuf_query_ioctl(dhd_pub_t
*dhd
, int ifidx
, uint cmd
, void *buf
, uint len
, uint8 action
)
6323 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6325 if (dhd
->bus
->is_linkdown
) {
6326 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
6331 if (dhd
->busstate
== DHD_BUS_DOWN
) {
6332 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__
));
6336 /* don't talk to the dongle if fw is about to be reloaded */
6337 if (dhd
->hang_was_sent
) {
6338 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6343 if (cmd
== WLC_GET_VAR
&& buf
)
6345 if (!len
|| !*(uint8
*)buf
) {
6346 DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__
));
6351 /* Respond "bcmerror" and "bcmerrorstr" with local cache */
6352 copylen
= MIN(len
, BCME_STRLEN
);
6354 if ((len
>= strlen("bcmerrorstr")) &&
6355 (!strcmp((char *)buf
, "bcmerrorstr"))) {
6356 strncpy((char *)buf
, bcmerrorstr(dhd
->dongle_error
), copylen
);
6357 *(uint8
*)((uint8
*)buf
+ (copylen
- 1)) = '\0';
6359 } else if ((len
>= strlen("bcmerror")) &&
6360 !strcmp((char *)buf
, "bcmerror")) {
6361 *(uint32
*)(uint32
*)buf
= dhd
->dongle_error
;
6366 DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
6367 action
, ifidx
, cmd
, len
));
6369 ret
= dhd_fillup_ioct_reqst(dhd
, (uint16
)len
, cmd
, buf
, ifidx
);
6372 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__
));
6376 /* wait for IOCTL completion message from dongle and get first fragment */
6377 ret
= dhd_msgbuf_wait_ioctl_cmplt(dhd
, len
, buf
);
6384 dhd_msgbuf_iovar_timeout_dump(dhd_pub_t
*dhd
)
6387 dhd_prot_t
*prot
= dhd
->prot
;
6388 dhd
->rxcnt_timeout
++;
6390 dhd
->iovar_timeout_occured
= TRUE
;
6391 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
6392 "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__
,
6393 dhd
->is_sched_error
? " due to scheduling problem" : "",
6394 dhd
->rxcnt_timeout
, prot
->curr_ioctl_cmd
, prot
->ioctl_trans_id
,
6395 prot
->ioctl_state
, dhd
->busstate
, prot
->ioctl_received
));
6396 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
6397 if (dhd
->is_sched_error
&& dhd
->memdump_enabled
) {
6398 /* change g_assert_type to trigger Kernel panic */
6400 /* use ASSERT() to trigger panic */
6403 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
6405 if (prot
->curr_ioctl_cmd
== WLC_SET_VAR
||
6406 prot
->curr_ioctl_cmd
== WLC_GET_VAR
) {
6409 int dump_size
= 128;
6410 uint8
*ioctl_buf
= (uint8
*)prot
->ioctbuf
.va
;
6411 memset(iovbuf
, 0, sizeof(iovbuf
));
6412 strncpy(iovbuf
, ioctl_buf
, sizeof(iovbuf
) - 1);
6413 iovbuf
[sizeof(iovbuf
) - 1] = '\0';
6414 DHD_ERROR(("Current IOVAR (%s): %s\n",
6415 prot
->curr_ioctl_cmd
== WLC_SET_VAR
?
6416 "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf
));
6417 DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
6418 for (i
= 0; i
< dump_size
; i
++) {
6419 DHD_ERROR(("%02X ", ioctl_buf
[i
]));
6420 if ((i
% 32) == 31) {
6424 DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
6427 /* Check the PCIe link status by reading intstatus register */
6428 intstatus
= si_corereg(dhd
->bus
->sih
,
6429 dhd
->bus
->sih
->buscoreidx
, dhd
->bus
->pcie_mailbox_int
, 0, 0);
6430 if (intstatus
== (uint32
)-1) {
6431 DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__
));
6432 dhd
->bus
->is_linkdown
= TRUE
;
6435 dhd_bus_dump_console_buffer(dhd
->bus
);
6436 dhd_prot_debug_info_print(dhd
);
6440 * Waits for IOCTL completion message from the dongle, copies this into caller
6441 * provided parameter 'buf'.
6444 dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t
*dhd
, uint32 len
, void *buf
)
6446 dhd_prot_t
*prot
= dhd
->prot
;
6448 unsigned long flags
;
6451 DHD_TRACE(("%s: Enter\n", __FUNCTION__
));
6453 if (dhd_query_bus_erros(dhd
)) {
6458 timeleft
= dhd_os_ioctl_resp_wait(dhd
, (uint
*)&prot
->ioctl_received
);
6460 #ifdef DHD_RECOVER_TIMEOUT
6461 if (prot
->ioctl_received
== 0) {
6462 uint32 intstatus
= si_corereg(dhd
->bus
->sih
,
6463 dhd
->bus
->sih
->buscoreidx
, dhd
->bus
->pcie_mailbox_int
, 0, 0);
6464 int host_irq_disbled
= dhdpcie_irq_disabled(dhd
->bus
);
6465 if ((intstatus
) && (intstatus
!= (uint32
)-1) &&
6466 (timeleft
== 0) && (!dhd_query_bus_erros(dhd
))) {
6467 DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
6468 " host_irq_disabled=%d\n",
6469 __FUNCTION__
, intstatus
, host_irq_disbled
));
6470 dhd_pcie_intr_count_dump(dhd
);
6471 dhd_print_tasklet_status(dhd
);
6472 dhd_prot_process_ctrlbuf(dhd
);
6473 timeleft
= dhd_os_ioctl_resp_wait(dhd
, (uint
*)&prot
->ioctl_received
);
6474 /* Clear Interrupts */
6475 dhdpcie_bus_clear_intstatus(dhd
->bus
);
6478 #endif /* DHD_RECOVER_TIMEOUT */
6480 if (timeleft
== 0 && (!dhd_query_bus_erros(dhd
))) {
6481 /* check if resumed on time out related to scheduling issue */
6482 dhd
->is_sched_error
= FALSE
;
6483 if (dhd
->bus
->isr_entry_time
> prot
->ioctl_fillup_time
) {
6484 dhd
->is_sched_error
= dhd_bus_query_dpc_sched_errors(dhd
);
6487 dhd_msgbuf_iovar_timeout_dump(dhd
);
6489 #ifdef DHD_FW_COREDUMP
6490 /* Collect socram dump */
6491 if (dhd
->memdump_enabled
) {
6492 /* collect core dump */
6493 dhd
->memdump_type
= DUMP_TYPE_RESUMED_ON_TIMEOUT
;
6494 dhd_bus_mem_dump(dhd
);
6496 #endif /* DHD_FW_COREDUMP */
6497 #ifdef SUPPORT_LINKDOWN_RECOVERY
6498 #ifdef CONFIG_ARCH_MSM
6499 dhd
->bus
->no_cfg_restore
= 1;
6500 #endif /* CONFIG_ARCH_MSM */
6501 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6505 if (prot
->ioctl_received
!= IOCTL_RETURN_ON_SUCCESS
) {
6506 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
6507 __FUNCTION__
, prot
->ioctl_received
));
6511 dhd
->rxcnt_timeout
= 0;
6513 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
6514 __FUNCTION__
, prot
->ioctl_resplen
));
6517 if (dhd
->prot
->ioctl_resplen
> len
)
6518 dhd
->prot
->ioctl_resplen
= (uint16
)len
;
6520 bcopy(dhd
->prot
->retbuf
.va
, buf
, dhd
->prot
->ioctl_resplen
);
6522 ret
= (int)(dhd
->prot
->ioctl_status
);
6525 DHD_GENERAL_LOCK(dhd
, flags
);
6526 dhd
->prot
->ioctl_state
= 0;
6527 dhd
->prot
->ioctl_resplen
= 0;
6528 dhd
->prot
->ioctl_received
= IOCTL_WAIT
;
6529 dhd
->prot
->curr_ioctl_cmd
= 0;
6530 DHD_GENERAL_UNLOCK(dhd
, flags
);
6533 } /* dhd_msgbuf_wait_ioctl_cmplt */
6536 dhd_msgbuf_set_ioctl(dhd_pub_t
*dhd
, int ifidx
, uint cmd
, void *buf
, uint len
, uint8 action
)
6540 DHD_TRACE(("%s: Enter \n", __FUNCTION__
));
6542 if (dhd
->bus
->is_linkdown
) {
6543 DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
6548 if (dhd
->busstate
== DHD_BUS_DOWN
) {
6549 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__
));
6553 /* don't talk to the dongle if fw is about to be reloaded */
6554 if (dhd
->hang_was_sent
) {
6555 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6560 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
6561 action
, ifidx
, cmd
, len
));
6563 /* Fill up msgbuf for ioctl req */
6564 ret
= dhd_fillup_ioct_reqst(dhd
, (uint16
)len
, cmd
, buf
, ifidx
);
6567 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__
));
6571 ret
= dhd_msgbuf_wait_ioctl_cmplt(dhd
, len
, buf
);
6577 /** Called by upper DHD layer. Handles a protocol control response asynchronously. */
6578 int dhd_prot_ctl_complete(dhd_pub_t
*dhd
)
6583 /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
6584 int dhd_prot_iovar_op(dhd_pub_t
*dhd
, const char *name
,
6585 void *params
, int plen
, void *arg
, int len
, bool set
)
6587 return BCME_UNSUPPORTED
;
6590 #ifdef DHD_DUMP_PCIE_RINGS
6591 int dhd_d2h_h2d_ring_dump(dhd_pub_t
*dhd
, void *file
, unsigned long *file_posn
)
6593 dhd_prot_t
*prot
= dhd
->prot
;
6594 msgbuf_ring_t
*ring
;
6597 ring
= &prot
->h2dring_ctrl_subn
;
6598 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6601 ring
= &prot
->d2hring_ctrl_cpln
;
6602 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6605 ring
= prot
->h2dring_info_subn
;
6606 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6609 ring
= prot
->d2hring_info_cpln
;
6610 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6613 ring
= &prot
->d2hring_tx_cpln
;
6614 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6617 ring
= &prot
->d2hring_rx_cpln
;
6618 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6621 ring
= prot
->h2d_flowrings_pool
;
6622 if ((ret
= dhd_ring_write(dhd
, ring
, file
, file_posn
)) < 0)
6629 /* Writes to file in TLV format */
6631 int dhd_ring_write(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, void *file
, unsigned long *file_posn
)
6633 unsigned long flags
;
6638 while (len
< ((ring
->max_items
) * (ring
->item_len
))) {
6642 DHD_RING_LOCK(ring
->ring_lock
, flags
);
6643 msg_addr
= (uint8
*)ring
->dma_buf
.va
+ (loc_rd
* ring
->item_len
);
6644 ASSERT(loc_rd
< ring
->max_items
);
6645 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6647 if (msg_addr
== NULL
) {
6650 msg
= (cmn_msg_hdr_t
*)msg_addr
;
6652 ret
= dhd_os_write_file_posn(file
, file_posn
, (char *)(&(msg
->msg_type
)),
6653 sizeof(msg
->msg_type
));
6655 DHD_ERROR(("%s: write file error !\n", __FUNCTION__
));
6658 ret
= dhd_os_write_file_posn(file
, file_posn
, (char *)(&(ring
->item_len
)),
6659 sizeof(ring
->item_len
));
6661 DHD_ERROR(("%s: write file error !\n", __FUNCTION__
));
6664 ret
= dhd_os_write_file_posn(file
, file_posn
, (char *)msg_addr
, ring
->item_len
);
6666 DHD_ERROR(("%s: write file error !\n", __FUNCTION__
));
6670 len
+= ring
->item_len
;
6675 #endif /* DHD_DUMP_PCIE_RINGS */
6677 /** Add prot dump output to a buffer */
6678 void dhd_prot_dump(dhd_pub_t
*dhd
, struct bcmstrbuf
*b
)
6681 if (dhd
->d2h_sync_mode
& PCIE_SHARED_D2H_SYNC_SEQNUM
)
6682 bcm_bprintf(b
, "\nd2h_sync: SEQNUM:");
6683 else if (dhd
->d2h_sync_mode
& PCIE_SHARED_D2H_SYNC_XORCSUM
)
6684 bcm_bprintf(b
, "\nd2h_sync: XORCSUM:");
6686 bcm_bprintf(b
, "\nd2h_sync: NONE:");
6687 bcm_bprintf(b
, " d2h_sync_wait max<%lu> tot<%lu>\n",
6688 dhd
->prot
->d2h_sync_wait_max
, dhd
->prot
->d2h_sync_wait_tot
);
6690 bcm_bprintf(b
, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
6691 dhd
->dma_h2d_ring_upd_support
,
6692 dhd
->dma_d2h_ring_upd_support
,
6693 dhd
->prot
->rw_index_sz
);
6694 bcm_bprintf(b
, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
6695 h2d_max_txpost
, dhd
->prot
->h2d_max_txpost
);
6698 /* Update local copy of dongle statistics */
6699 void dhd_prot_dstats(dhd_pub_t
*dhd
)
6704 /** Called by upper DHD layer */
6705 int dhd_process_pkt_reorder_info(dhd_pub_t
*dhd
, uchar
*reorder_info_buf
,
6706 uint reorder_info_len
, void **pkt
, uint32
*free_buf_count
)
6711 /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
6713 dhd_post_dummy_msg(dhd_pub_t
*dhd
)
6715 unsigned long flags
;
6716 hostevent_hdr_t
*hevent
= NULL
;
6719 dhd_prot_t
*prot
= dhd
->prot
;
6720 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
6722 DHD_RING_LOCK(ring
->ring_lock
, flags
);
6724 hevent
= (hostevent_hdr_t
*)
6725 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
6727 if (hevent
== NULL
) {
6728 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6732 /* CMN msg header */
6733 hevent
->msg
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
6735 hevent
->msg
.msg_type
= MSG_TYPE_HOST_EVNT
;
6736 hevent
->msg
.if_id
= 0;
6737 hevent
->msg
.flags
= ring
->current_phase
;
6740 hevent
->evnt_pyld
= htol32(HOST_EVENT_CONS_CMD
);
6742 /* Since, we are filling the data directly into the bufptr obtained
6743 * from the msgbuf, we can directly call the write_complete
6745 dhd_prot_ring_write_complete(dhd
, ring
, hevent
, 1);
6747 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6753 * If exactly_nitems is true, this function will allocate space for nitems or fail
6754 * If exactly_nitems is false, this function will allocate space for nitems or less
6756 static void * BCMFASTPATH
6757 dhd_prot_alloc_ring_space(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
,
6758 uint16 nitems
, uint16
* alloced
, bool exactly_nitems
)
6762 /* Alloc space for nitems in the ring */
6763 ret_buf
= dhd_prot_get_ring_space(ring
, nitems
, alloced
, exactly_nitems
);
6765 if (ret_buf
== NULL
) {
6766 /* if alloc failed , invalidate cached read ptr */
6767 if (dhd
->dma_d2h_ring_upd_support
) {
6768 ring
->rd
= dhd_prot_dma_indx_get(dhd
, H2D_DMA_INDX_RD_UPD
, ring
->idx
);
6770 dhd_bus_cmn_readshared(dhd
->bus
, &(ring
->rd
), RING_RD_UPD
, ring
->idx
);
6771 #ifdef SUPPORT_LINKDOWN_RECOVERY
6772 /* Check if ring->rd is valid */
6773 if (ring
->rd
>= ring
->max_items
) {
6774 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring
->name
, ring
->rd
));
6775 dhd
->bus
->read_shm_fail
= TRUE
;
6778 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6781 /* Try allocating once more */
6782 ret_buf
= dhd_prot_get_ring_space(ring
, nitems
, alloced
, exactly_nitems
);
6784 if (ret_buf
== NULL
) {
6785 DHD_INFO(("%s: Ring space not available \n", ring
->name
));
6790 if (ret_buf
== HOST_RING_BASE(ring
)) {
6791 DHD_INFO(("%s: setting the phase now\n", ring
->name
));
6792 ring
->current_phase
= ring
->current_phase
? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT
;
6795 /* Return alloced space */
6800 * Non inline ioct request.
6801 * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
6802 * Form a separate request buffer where a 4 byte cmn header is added in the front
6803 * buf contents from parent function is copied to remaining section of this buffer
6806 dhd_fillup_ioct_reqst(dhd_pub_t
*dhd
, uint16 len
, uint cmd
, void* buf
, int ifidx
)
6808 dhd_prot_t
*prot
= dhd
->prot
;
6809 ioctl_req_msg_t
*ioct_rqst
;
6810 void * ioct_buf
; /* For ioctl payload */
6811 uint16 rqstlen
, resplen
;
6812 unsigned long flags
;
6814 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
6816 if (dhd_query_bus_erros(dhd
)) {
6823 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
6824 /* 8K allocation of dongle buffer fails */
6825 /* dhd doesnt give separate input & output buf lens */
6826 /* so making the assumption that input length can never be more than 2k */
6827 rqstlen
= MIN(rqstlen
, MSGBUF_IOCTL_MAX_RQSTLEN
);
6829 DHD_RING_LOCK(ring
->ring_lock
, flags
);
6831 if (prot
->ioctl_state
) {
6832 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__
, prot
->ioctl_state
));
6833 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6836 prot
->ioctl_state
= MSGBUF_IOCTL_ACK_PENDING
| MSGBUF_IOCTL_RESP_PENDING
;
6839 /* Request for cbuf space */
6840 ioct_rqst
= (ioctl_req_msg_t
*)
6841 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
6842 if (ioct_rqst
== NULL
) {
6843 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
6844 prot
->ioctl_state
= 0;
6845 prot
->curr_ioctl_cmd
= 0;
6846 prot
->ioctl_received
= IOCTL_WAIT
;
6847 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6851 /* Common msg buf hdr */
6852 ioct_rqst
->cmn_hdr
.msg_type
= MSG_TYPE_IOCTLPTR_REQ
;
6853 ioct_rqst
->cmn_hdr
.if_id
= (uint8
)ifidx
;
6854 ioct_rqst
->cmn_hdr
.flags
= ring
->current_phase
;
6855 ioct_rqst
->cmn_hdr
.request_id
= htol32(DHD_IOCTL_REQ_PKTID
);
6856 ioct_rqst
->cmn_hdr
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
6859 ioct_rqst
->cmd
= htol32(cmd
);
6860 prot
->curr_ioctl_cmd
= cmd
;
6861 ioct_rqst
->output_buf_len
= htol16(resplen
);
6862 prot
->ioctl_trans_id
++;
6863 ioct_rqst
->trans_id
= prot
->ioctl_trans_id
;
6865 /* populate ioctl buffer info */
6866 ioct_rqst
->input_buf_len
= htol16(rqstlen
);
6867 ioct_rqst
->host_input_buf_addr
.high
= htol32(PHYSADDRHI(prot
->ioctbuf
.pa
));
6868 ioct_rqst
->host_input_buf_addr
.low
= htol32(PHYSADDRLO(prot
->ioctbuf
.pa
));
6869 /* copy ioct payload */
6870 ioct_buf
= (void *) prot
->ioctbuf
.va
;
6872 prot
->ioctl_fillup_time
= OSL_LOCALTIME_NS();
6875 memcpy(ioct_buf
, buf
, len
);
6877 OSL_CACHE_FLUSH((void *) prot
->ioctbuf
.va
, len
);
6879 if (!ISALIGNED(ioct_buf
, DMA_ALIGN_LEN
))
6880 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
6882 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
6883 ioct_rqst
->cmn_hdr
.request_id
, cmd
, ioct_rqst
->output_buf_len
,
6884 ioct_rqst
->trans_id
));
6886 /* update ring's WR index and ring doorbell to dongle */
6887 dhd_prot_ring_write_complete(dhd
, ring
, ioct_rqst
, 1);
6889 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
6892 } /* dhd_fillup_ioct_reqst */
6895 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
6896 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
6897 * information is posted to the dongle.
6899 * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
6900 * each flowring in pool of flowrings.
6902 * returns BCME_OK=0 on success
6903 * returns non-zero negative error value on failure.
6906 dhd_prot_ring_attach(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, const char *name
,
6907 uint16 max_items
, uint16 item_len
, uint16 ringid
)
6909 int dma_buf_alloced
= BCME_NOMEM
;
6910 uint32 dma_buf_len
= max_items
* item_len
;
6911 dhd_prot_t
*prot
= dhd
->prot
;
6912 uint16 max_flowrings
= dhd
->bus
->max_tx_flowrings
;
6916 ASSERT((max_items
< 0xFFFF) && (item_len
< 0xFFFF) && (ringid
< 0xFFFF));
6919 strncpy(ring
->name
, name
, RING_NAME_MAX_LENGTH
);
6920 ring
->name
[RING_NAME_MAX_LENGTH
- 1] = '\0';
6924 ring
->max_items
= max_items
;
6925 ring
->item_len
= item_len
;
6927 /* A contiguous space may be reserved for all flowrings */
6928 if (DHD_IS_FLOWRING(ringid
, max_flowrings
) && (prot
->flowrings_dma_buf
.va
)) {
6929 /* Carve out from the contiguous DMA-able flowring buffer */
6933 dhd_dma_buf_t
*dma_buf
= &ring
->dma_buf
;
6934 dhd_dma_buf_t
*rsv_buf
= &prot
->flowrings_dma_buf
;
6936 flowid
= DHD_RINGID_TO_FLOWID(ringid
);
6937 base_offset
= (flowid
- BCMPCIE_H2D_COMMON_MSGRINGS
) * dma_buf_len
;
6939 ASSERT(base_offset
+ dma_buf_len
<= rsv_buf
->len
);
6941 dma_buf
->len
= dma_buf_len
;
6942 dma_buf
->va
= (void *)((uintptr
)rsv_buf
->va
+ base_offset
);
6943 PHYSADDRHISET(dma_buf
->pa
, PHYSADDRHI(rsv_buf
->pa
));
6944 PHYSADDRLOSET(dma_buf
->pa
, PHYSADDRLO(rsv_buf
->pa
) + base_offset
);
6946 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
6947 ASSERT(PHYSADDRLO(dma_buf
->pa
) >= PHYSADDRLO(rsv_buf
->pa
));
6949 dma_buf
->dmah
= rsv_buf
->dmah
;
6950 dma_buf
->secdma
= rsv_buf
->secdma
;
6952 (void)dhd_dma_buf_audit(dhd
, &ring
->dma_buf
);
6954 /* Allocate a dhd_dma_buf */
6955 dma_buf_alloced
= dhd_dma_buf_alloc(dhd
, &ring
->dma_buf
, dma_buf_len
);
6956 if (dma_buf_alloced
!= BCME_OK
) {
6961 /* CAUTION: Save ring::base_addr in little endian format! */
6962 dhd_base_addr_htolpa(&ring
->base_addr
, ring
->dma_buf
.pa
);
6964 #ifdef BCM_SECURE_DMA
6965 if (SECURE_DMA_ENAB(prot
->osh
)) {
6966 ring
->dma_buf
.secdma
= MALLOCZ(prot
->osh
, sizeof(sec_cma_info_t
));
6967 if (ring
->dma_buf
.secdma
== NULL
) {
6971 #endif /* BCM_SECURE_DMA */
6973 ring
->ring_lock
= dhd_os_spin_lock_init(dhd
->osh
);
6975 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
6976 "ring start %p buf phys addr %x:%x \n",
6977 ring
->name
, ring
->max_items
, ring
->item_len
,
6978 dma_buf_len
, ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
6979 ltoh32(ring
->base_addr
.low_addr
)));
6983 #ifdef BCM_SECURE_DMA
6985 if (dma_buf_alloced
== BCME_OK
) {
6986 dhd_dma_buf_free(dhd
, &ring
->dma_buf
);
6988 #endif /* BCM_SECURE_DMA */
6992 } /* dhd_prot_ring_attach */
6995 * dhd_prot_ring_init - Post the common ring information to dongle.
6997 * Used only for common rings.
6999 * The flowrings information is passed via the create flowring control message
7000 * (tx_flowring_create_request_t) sent over the H2D control submission common
7004 dhd_prot_ring_init(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
)
7010 /* CAUTION: ring::base_addr already in Little Endian */
7011 dhd_bus_cmn_writeshared(dhd
->bus
, &ring
->base_addr
,
7012 sizeof(sh_addr_t
), RING_BUF_ADDR
, ring
->idx
);
7013 dhd_bus_cmn_writeshared(dhd
->bus
, &ring
->max_items
,
7014 sizeof(uint16
), RING_MAX_ITEMS
, ring
->idx
);
7015 dhd_bus_cmn_writeshared(dhd
->bus
, &ring
->item_len
,
7016 sizeof(uint16
), RING_ITEM_LEN
, ring
->idx
);
7018 dhd_bus_cmn_writeshared(dhd
->bus
, &(ring
->wr
),
7019 sizeof(uint16
), RING_WR_UPD
, ring
->idx
);
7020 dhd_bus_cmn_writeshared(dhd
->bus
, &(ring
->rd
),
7021 sizeof(uint16
), RING_RD_UPD
, ring
->idx
);
7024 ring
->inited
= TRUE
;
7026 } /* dhd_prot_ring_init */
7029 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
7030 * Reset WR and RD indices to 0.
7033 dhd_prot_ring_reset(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
)
7035 DHD_TRACE(("%s\n", __FUNCTION__
));
7037 dhd_dma_buf_reset(dhd
, &ring
->dma_buf
);
7039 ring
->rd
= ring
->wr
= 0;
7041 ring
->inited
= FALSE
;
7042 ring
->create_pending
= FALSE
;
7046 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
7047 * hanging off the msgbuf_ring.
7050 dhd_prot_ring_detach(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
)
7052 dhd_prot_t
*prot
= dhd
->prot
;
7053 uint16 max_flowrings
= dhd
->bus
->max_tx_flowrings
;
7056 ring
->inited
= FALSE
;
7057 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
7059 #ifdef BCM_SECURE_DMA
7060 if (SECURE_DMA_ENAB(prot
->osh
)) {
7061 if (ring
->dma_buf
.secdma
) {
7062 SECURE_DMA_UNMAP_ALL(prot
->osh
, ring
->dma_buf
.secdma
);
7063 MFREE(prot
->osh
, ring
->dma_buf
.secdma
, sizeof(sec_cma_info_t
));
7064 ring
->dma_buf
.secdma
= NULL
;
7067 #endif /* BCM_SECURE_DMA */
7069 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
7070 * memory, then simply stop using it.
7072 if (DHD_IS_FLOWRING(ring
->idx
, max_flowrings
) && (prot
->flowrings_dma_buf
.va
)) {
7073 (void)dhd_dma_buf_audit(dhd
, &ring
->dma_buf
);
7074 memset(&ring
->dma_buf
, 0, sizeof(dhd_dma_buf_t
));
7076 dhd_dma_buf_free(dhd
, &ring
->dma_buf
);
7079 dhd_os_spin_lock_deinit(dhd
->osh
, ring
->ring_lock
);
7081 } /* dhd_prot_ring_detach */
7084 * +----------------------------------------------------------------------------
7087 * Unlike common rings, which are attached very early on (dhd_prot_attach),
7088 * flowrings are dynamically instantiated. Moreover, flowrings may require a
7089 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
7090 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
7091 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
7093 * Each DMA-able buffer may be allocated independently, or may be carved out
7094 * of a single large contiguous region that is registered with the protocol
7095 * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
7096 * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
7098 * No flowring pool action is performed in dhd_prot_attach(), as the number
7099 * of h2d rings is not yet known.
7101 * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
7102 * determine the number of flowrings required, and a pool of msgbuf_rings are
7103 * allocated and a DMA-able buffer (carved or allocated) is attached.
7104 * See: dhd_prot_flowrings_pool_attach()
7106 * A flowring msgbuf_ring object may be fetched from this pool during flowring
7107 * creation, using the flowid. Likewise, flowrings may be freed back into the
7108 * pool on flowring deletion.
7109 * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
7111 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
7112 * are detached (returned back to the carved region or freed), and the pool of
7113 * msgbuf_ring and any objects allocated against it are freed.
7114 * See: dhd_prot_flowrings_pool_detach()
7116 * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
7117 * state as-if upon an attach. All DMA-able buffers are retained.
7118 * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
7119 * pool attach will notice that the pool persists and continue to use it. This
7120 * will avoid the case of a fragmented DMA-able region.
7122 * +----------------------------------------------------------------------------
7125 /* Conversion of a flowid to a flowring pool index */
7126 #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
7127 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
7129 /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
7130 #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
7131 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
7132 DHD_FLOWRINGS_POOL_OFFSET(flowid)
7134 /* Traverse each flowring in the flowring pool, assigning ring and flowid */
7135 #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
7136 for ((flowid) = DHD_FLOWRING_START_FLOWID, \
7137 (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
7138 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
7139 (ring)++, (flowid)++)
7141 /* Fetch number of H2D flowrings given the total number of h2d rings */
7143 dhd_get_max_flow_rings(dhd_pub_t
*dhd
)
7145 if (dhd
->bus
->api
.fw_rev
>= PCIE_SHARED_VERSION_6
)
7146 return dhd
->bus
->max_tx_flowrings
;
7148 return (dhd
->bus
->max_tx_flowrings
- BCMPCIE_H2D_COMMON_MSGRINGS
);
7152 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
7154 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
7155 * Dongle includes common rings when it advertizes the number of H2D rings.
7156 * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
7157 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
7159 * dhd_prot_ring_attach is invoked to perform the actual initialization and
7160 * attaching the DMA-able buffer.
7162 * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
7163 * initialized msgbuf_ring_t object.
7165 * returns BCME_OK=0 on success
7166 * returns non-zero negative error value on failure.
7169 dhd_prot_flowrings_pool_attach(dhd_pub_t
*dhd
)
7172 msgbuf_ring_t
*ring
;
7173 uint16 h2d_flowrings_total
; /* exclude H2D common rings */
7174 dhd_prot_t
*prot
= dhd
->prot
;
7175 char ring_name
[RING_NAME_MAX_LENGTH
];
7177 if (prot
->h2d_flowrings_pool
!= NULL
)
7178 return BCME_OK
; /* dhd_prot_init rentry after a dhd_prot_reset */
7180 ASSERT(prot
->h2d_rings_total
== 0);
7182 /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
7183 prot
->h2d_rings_total
= (uint16
)dhd_bus_max_h2d_queues(dhd
->bus
);
7185 if (prot
->h2d_rings_total
< BCMPCIE_H2D_COMMON_MSGRINGS
) {
7186 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
7187 __FUNCTION__
, prot
->h2d_rings_total
));
7191 /* Subtract number of H2D common rings, to determine number of flowrings */
7192 h2d_flowrings_total
= dhd_get_max_flow_rings(dhd
);
7194 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total
));
7196 /* Allocate pool of msgbuf_ring_t objects for all flowrings */
7197 prot
->h2d_flowrings_pool
= (msgbuf_ring_t
*)MALLOCZ(prot
->osh
,
7198 (h2d_flowrings_total
* sizeof(msgbuf_ring_t
)));
7200 if (prot
->h2d_flowrings_pool
== NULL
) {
7201 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
7202 __FUNCTION__
, h2d_flowrings_total
));
7206 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
7207 FOREACH_RING_IN_FLOWRINGS_POOL(prot
, ring
, flowid
, h2d_flowrings_total
) {
7208 snprintf(ring_name
, sizeof(ring_name
), "h2dflr_%03u", flowid
);
7209 if (dhd_prot_ring_attach(dhd
, ring
, ring_name
,
7210 prot
->h2d_max_txpost
, H2DRING_TXPOST_ITEMSIZE
,
7211 DHD_FLOWID_TO_RINGID(flowid
)) != BCME_OK
) {
7219 dhd_prot_flowrings_pool_detach(dhd
); /* Free entire pool of flowrings */
7222 prot
->h2d_rings_total
= 0;
7225 } /* dhd_prot_flowrings_pool_attach */
7228 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
7229 * Invokes dhd_prot_ring_reset to perform the actual reset.
7231 * The DMA-able buffer is not freed during reset and neither is the flowring
7234 * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
7235 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
7236 * from a previous flowring pool instantiation will be reused.
7238 * This will avoid a fragmented DMA-able memory condition, if multiple
7239 * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
7243 dhd_prot_flowrings_pool_reset(dhd_pub_t
*dhd
)
7245 uint16 flowid
, h2d_flowrings_total
;
7246 msgbuf_ring_t
*ring
;
7247 dhd_prot_t
*prot
= dhd
->prot
;
7249 if (prot
->h2d_flowrings_pool
== NULL
) {
7250 ASSERT(prot
->h2d_rings_total
== 0);
7253 h2d_flowrings_total
= dhd_get_max_flow_rings(dhd
);
7254 /* Reset each flowring in the flowring pool */
7255 FOREACH_RING_IN_FLOWRINGS_POOL(prot
, ring
, flowid
, h2d_flowrings_total
) {
7256 dhd_prot_ring_reset(dhd
, ring
);
7257 ring
->inited
= FALSE
;
7260 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
7264 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
7265 * DMA-able buffers for flowrings.
7266 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
7267 * de-initialization of each msgbuf_ring_t.
7270 dhd_prot_flowrings_pool_detach(dhd_pub_t
*dhd
)
7273 msgbuf_ring_t
*ring
;
7274 uint16 h2d_flowrings_total
; /* exclude H2D common rings */
7275 dhd_prot_t
*prot
= dhd
->prot
;
7277 if (prot
->h2d_flowrings_pool
== NULL
) {
7278 ASSERT(prot
->h2d_rings_total
== 0);
7282 h2d_flowrings_total
= dhd_get_max_flow_rings(dhd
);
7283 /* Detach the DMA-able buffer for each flowring in the flowring pool */
7284 FOREACH_RING_IN_FLOWRINGS_POOL(prot
, ring
, flowid
, h2d_flowrings_total
) {
7285 dhd_prot_ring_detach(dhd
, ring
);
7288 MFREE(prot
->osh
, prot
->h2d_flowrings_pool
,
7289 (h2d_flowrings_total
* sizeof(msgbuf_ring_t
)));
7291 prot
->h2d_flowrings_pool
= (msgbuf_ring_t
*)NULL
;
7292 prot
->h2d_rings_total
= 0;
7294 } /* dhd_prot_flowrings_pool_detach */
7297 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
7298 * msgbuf_ring from the flowring pool, and assign it.
7300 * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
7301 * ring information to the dongle, a flowring's information is passed via a
7302 * flowring create control message.
7304 * Only the ring state (WR, RD) index are initialized.
7306 static msgbuf_ring_t
*
7307 dhd_prot_flowrings_pool_fetch(dhd_pub_t
*dhd
, uint16 flowid
)
7309 msgbuf_ring_t
*ring
;
7310 dhd_prot_t
*prot
= dhd
->prot
;
7312 ASSERT(flowid
>= DHD_FLOWRING_START_FLOWID
);
7313 ASSERT(flowid
< prot
->h2d_rings_total
);
7314 ASSERT(prot
->h2d_flowrings_pool
!= NULL
);
7316 ring
= DHD_RING_IN_FLOWRINGS_POOL(prot
, flowid
);
7318 /* ASSERT flow_ring->inited == FALSE */
7323 ring
->inited
= TRUE
;
7325 * Every time a flowring starts dynamically, initialize current_phase with 0
7326 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
7328 ring
->current_phase
= 0;
7333 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
7334 * msgbuf_ring back to the flow_ring pool.
7337 dhd_prot_flowrings_pool_release(dhd_pub_t
*dhd
, uint16 flowid
, void *flow_ring
)
7339 msgbuf_ring_t
*ring
;
7340 dhd_prot_t
*prot
= dhd
->prot
;
7342 ASSERT(flowid
>= DHD_FLOWRING_START_FLOWID
);
7343 ASSERT(flowid
< prot
->h2d_rings_total
);
7344 ASSERT(prot
->h2d_flowrings_pool
!= NULL
);
7346 ring
= DHD_RING_IN_FLOWRINGS_POOL(prot
, flowid
);
7348 ASSERT(ring
== (msgbuf_ring_t
*)flow_ring
);
7349 /* ASSERT flow_ring->inited == TRUE */
7351 (void)dhd_dma_buf_audit(dhd
, &ring
->dma_buf
);
7355 ring
->inited
= FALSE
;
7360 /* Assumes only one index is updated at a time */
7361 /* If exactly_nitems is true, this function will allocate space for nitems or fail */
7362 /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
7363 /* If exactly_nitems is false, this function will allocate space for nitems or less */
7364 static void *BCMFASTPATH
7365 dhd_prot_get_ring_space(msgbuf_ring_t
*ring
, uint16 nitems
, uint16
* alloced
,
7366 bool exactly_nitems
)
7368 void *ret_ptr
= NULL
;
7369 uint16 ring_avail_cnt
;
7371 ASSERT(nitems
<= ring
->max_items
);
7373 ring_avail_cnt
= CHECK_WRITE_SPACE(ring
->rd
, ring
->wr
, ring
->max_items
);
7375 if ((ring_avail_cnt
== 0) ||
7376 (exactly_nitems
&& (ring_avail_cnt
< nitems
) &&
7377 ((ring
->max_items
- ring
->wr
) >= nitems
))) {
7378 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
7379 ring
->name
, nitems
, ring
->wr
, ring
->rd
));
7382 *alloced
= MIN(nitems
, ring_avail_cnt
);
7384 /* Return next available space */
7385 ret_ptr
= (char *)DHD_RING_BGN_VA(ring
) + (ring
->wr
* ring
->item_len
);
7387 /* Update write index */
7388 if ((ring
->wr
+ *alloced
) == ring
->max_items
)
7390 else if ((ring
->wr
+ *alloced
) < ring
->max_items
)
7391 ring
->wr
+= *alloced
;
7393 /* Should never hit this */
7399 } /* dhd_prot_get_ring_space */
7402 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
7403 * new messages in a H2D ring. The messages are flushed from cache prior to
7404 * posting the new WR index. The new WR index will be updated in the DMA index
7405 * array or directly in the dongle's ring state memory.
7406 * A PCIE doorbell will be generated to wake up the dongle.
7407 * This is a non-atomic function, make sure the callers
7408 * always hold appropriate locks.
7410 static void BCMFASTPATH
7411 __dhd_prot_ring_write_complete(dhd_pub_t
*dhd
, msgbuf_ring_t
* ring
, void* p
,
7414 dhd_prot_t
*prot
= dhd
->prot
;
7416 uint16 max_flowrings
= dhd
->bus
->max_tx_flowrings
;
7420 OSL_CACHE_FLUSH(p
, ring
->item_len
* nitems
);
7422 if (IDMA_ACTIVE(dhd
) || dhd
->dma_h2d_ring_upd_support
) {
7423 dhd_prot_dma_indx_set(dhd
, ring
->wr
,
7424 H2D_DMA_INDX_WR_UPD
, ring
->idx
);
7425 } else if (IFRM_ACTIVE(dhd
) && DHD_IS_FLOWRING(ring
->idx
, max_flowrings
)) {
7426 dhd_prot_dma_indx_set(dhd
, ring
->wr
,
7427 H2D_IFRM_INDX_WR_UPD
, ring
->idx
);
7429 dhd_bus_cmn_writeshared(dhd
->bus
, &(ring
->wr
),
7430 sizeof(uint16
), RING_WR_UPD
, ring
->idx
);
7433 /* raise h2d interrupt */
7434 if (IDMA_ACTIVE(dhd
) ||
7435 (IFRM_ACTIVE(dhd
) && DHD_IS_FLOWRING(ring
->idx
, max_flowrings
))) {
7436 db_index
= IDMA_IDX0
;
7437 /* this api is called in wl down path..in that case sih is freed already */
7438 if (dhd
->bus
->sih
) {
7439 corerev
= dhd
->bus
->sih
->buscorerev
;
7440 /* We need to explictly configure the type of DMA for core rev >= 24 */
7441 if (corerev
>= 24) {
7442 db_index
|= (DMA_TYPE_IDMA
<< DMA_TYPE_SHIFT
);
7445 prot
->mb_2_ring_fn(dhd
->bus
, db_index
, TRUE
);
7447 prot
->mb_ring_fn(dhd
->bus
, ring
->wr
);
7451 static void BCMFASTPATH
7452 dhd_prot_ring_write_complete(dhd_pub_t
*dhd
, msgbuf_ring_t
* ring
, void* p
,
7455 unsigned long flags_bus
;
7456 DHD_BUS_LOCK(dhd
->bus
->bus_lock
, flags_bus
);
7457 __dhd_prot_ring_write_complete(dhd
, ring
, p
, nitems
);
7458 DHD_BUS_UNLOCK(dhd
->bus
->bus_lock
, flags_bus
);
7462 * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
7463 * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
7464 * to indicate D3_INFORM sent in the same BUS_LOCK.
7466 static void BCMFASTPATH
7467 dhd_prot_ring_write_complete_mbdata(dhd_pub_t
*dhd
, msgbuf_ring_t
* ring
, void *p
,
7468 uint16 nitems
, uint32 mb_data
)
7470 unsigned long flags_bus
;
7472 DHD_BUS_LOCK(dhd
->bus
->bus_lock
, flags_bus
);
7474 __dhd_prot_ring_write_complete(dhd
, ring
, p
, nitems
);
7476 /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
7477 if (mb_data
== H2D_HOST_D3_INFORM
) {
7478 dhd
->bus
->bus_low_power_state
= DHD_BUS_D3_INFORM_SENT
;
7481 DHD_BUS_UNLOCK(dhd
->bus
->bus_lock
, flags_bus
);
7485 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
7486 * from a D2H ring. The new RD index will be updated in the DMA Index array or
7487 * directly in dongle's ring state memory.
7490 dhd_prot_upd_read_idx(dhd_pub_t
*dhd
, msgbuf_ring_t
* ring
)
7492 dhd_prot_t
*prot
= dhd
->prot
;
7496 /* update read index */
7497 /* If dma'ing h2d indices supported
7498 * update the r -indices in the
7499 * host memory o/w in TCM
7501 if (IDMA_ACTIVE(dhd
)) {
7502 dhd_prot_dma_indx_set(dhd
, ring
->rd
,
7503 D2H_DMA_INDX_RD_UPD
, ring
->idx
);
7504 db_index
= IDMA_IDX1
;
7505 if (dhd
->bus
->sih
) {
7506 corerev
= dhd
->bus
->sih
->buscorerev
;
7507 /* We need to explictly configure the type of DMA for core rev >= 24 */
7508 if (corerev
>= 24) {
7509 db_index
|= (DMA_TYPE_IDMA
<< DMA_TYPE_SHIFT
);
7512 prot
->mb_2_ring_fn(dhd
->bus
, db_index
, FALSE
);
7513 } else if (dhd
->dma_h2d_ring_upd_support
) {
7514 dhd_prot_dma_indx_set(dhd
, ring
->rd
,
7515 D2H_DMA_INDX_RD_UPD
, ring
->idx
);
7517 dhd_bus_cmn_writeshared(dhd
->bus
, &(ring
->rd
),
7518 sizeof(uint16
), RING_RD_UPD
, ring
->idx
);
7523 dhd_send_d2h_ringcreate(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring_to_create
,
7524 uint16 ring_type
, uint32 req_id
)
7526 unsigned long flags
;
7527 d2h_ring_create_req_t
*d2h_ring
;
7530 uint16 max_h2d_rings
= dhd
->bus
->max_submission_rings
;
7531 msgbuf_ring_t
*ctrl_ring
= &dhd
->prot
->h2dring_ctrl_subn
;
7533 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
7535 DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__
));
7537 if (ring_to_create
== NULL
) {
7538 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__
));
7543 /* Request for ring buffer space */
7544 d2h_ring
= (d2h_ring_create_req_t
*) dhd_prot_alloc_ring_space(dhd
,
7545 ctrl_ring
, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
,
7548 if (d2h_ring
== NULL
) {
7549 DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
7554 ring_to_create
->create_req_id
= (uint16
)req_id
;
7555 ring_to_create
->create_pending
= TRUE
;
7557 /* Common msg buf hdr */
7558 d2h_ring
->msg
.msg_type
= MSG_TYPE_D2H_RING_CREATE
;
7559 d2h_ring
->msg
.if_id
= 0;
7560 d2h_ring
->msg
.flags
= ctrl_ring
->current_phase
;
7561 d2h_ring
->msg
.request_id
= htol32(ring_to_create
->create_req_id
);
7562 d2h_ring
->ring_id
= htol16(DHD_D2H_RING_OFFSET(ring_to_create
->idx
, max_h2d_rings
));
7563 d2h_ring
->ring_type
= ring_type
;
7564 d2h_ring
->max_items
= htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM
);
7565 d2h_ring
->len_item
= htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE
);
7566 d2h_ring
->ring_ptr
.low_addr
= ring_to_create
->base_addr
.low_addr
;
7567 d2h_ring
->ring_ptr
.high_addr
= ring_to_create
->base_addr
.high_addr
;
7569 d2h_ring
->flags
= 0;
7570 d2h_ring
->msg
.epoch
=
7571 ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
7572 ctrl_ring
->seqnum
++;
7574 /* Update the flow_ring's WRITE index */
7575 dhd_prot_ring_write_complete(dhd
, ctrl_ring
, d2h_ring
,
7576 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
);
7578 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
7582 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
7588 dhd_send_h2d_ringcreate(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring_to_create
, uint8 ring_type
, uint32 id
)
7590 unsigned long flags
;
7591 h2d_ring_create_req_t
*h2d_ring
;
7595 msgbuf_ring_t
*ctrl_ring
= &dhd
->prot
->h2dring_ctrl_subn
;
7597 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
7599 DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__
));
7601 if (ring_to_create
== NULL
) {
7602 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__
));
7607 /* Request for ring buffer space */
7608 h2d_ring
= (h2d_ring_create_req_t
*)dhd_prot_alloc_ring_space(dhd
,
7609 ctrl_ring
, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
,
7612 if (h2d_ring
== NULL
) {
7613 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
7618 ring_to_create
->create_req_id
= (uint16
)id
;
7619 ring_to_create
->create_pending
= TRUE
;
7621 /* Common msg buf hdr */
7622 h2d_ring
->msg
.msg_type
= MSG_TYPE_H2D_RING_CREATE
;
7623 h2d_ring
->msg
.if_id
= 0;
7624 h2d_ring
->msg
.request_id
= htol32(ring_to_create
->create_req_id
);
7625 h2d_ring
->msg
.flags
= ctrl_ring
->current_phase
;
7626 h2d_ring
->ring_id
= htol16(DHD_H2D_RING_OFFSET(ring_to_create
->idx
));
7627 h2d_ring
->ring_type
= ring_type
;
7628 h2d_ring
->max_items
= htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM
);
7629 h2d_ring
->n_completion_ids
= ring_to_create
->n_completion_ids
;
7630 h2d_ring
->len_item
= htol16(H2DRING_INFO_BUFPOST_ITEMSIZE
);
7631 h2d_ring
->ring_ptr
.low_addr
= ring_to_create
->base_addr
.low_addr
;
7632 h2d_ring
->ring_ptr
.high_addr
= ring_to_create
->base_addr
.high_addr
;
7634 for (i
= 0; i
< ring_to_create
->n_completion_ids
; i
++) {
7635 h2d_ring
->completion_ring_ids
[i
] = htol16(ring_to_create
->compeltion_ring_ids
[i
]);
7638 h2d_ring
->flags
= 0;
7639 h2d_ring
->msg
.epoch
=
7640 ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
7641 ctrl_ring
->seqnum
++;
7643 /* Update the flow_ring's WRITE index */
7644 dhd_prot_ring_write_complete(dhd
, ctrl_ring
, h2d_ring
,
7645 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
);
7647 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
7651 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
7657 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
7658 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
7659 * See dhd_prot_dma_indx_init()
7662 dhd_prot_dma_indx_set(dhd_pub_t
*dhd
, uint16 new_index
, uint8 type
, uint16 ringid
)
7666 dhd_prot_t
*prot
= dhd
->prot
;
7667 uint16 max_h2d_rings
= dhd
->bus
->max_submission_rings
;
7670 case H2D_DMA_INDX_WR_UPD
:
7671 ptr
= (uint8
*)(prot
->h2d_dma_indx_wr_buf
.va
);
7672 offset
= DHD_H2D_RING_OFFSET(ringid
);
7675 case D2H_DMA_INDX_RD_UPD
:
7676 ptr
= (uint8
*)(prot
->d2h_dma_indx_rd_buf
.va
);
7677 offset
= DHD_D2H_RING_OFFSET(ringid
, max_h2d_rings
);
7680 case H2D_IFRM_INDX_WR_UPD
:
7681 ptr
= (uint8
*)(prot
->h2d_ifrm_indx_wr_buf
.va
);
7682 offset
= DHD_H2D_FRM_FLOW_RING_OFFSET(ringid
);
7686 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7691 ASSERT(prot
->rw_index_sz
!= 0);
7692 ptr
+= offset
* prot
->rw_index_sz
;
7694 *(uint16
*)ptr
= htol16(new_index
);
7696 OSL_CACHE_FLUSH((void *)ptr
, prot
->rw_index_sz
);
7698 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7699 __FUNCTION__
, new_index
, type
, ringid
, ptr
, offset
));
7701 } /* dhd_prot_dma_indx_set */
7704 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
7706 * Dongle DMAes an entire array to host memory (if the feature is enabled).
7707 * See dhd_prot_dma_indx_init()
7710 dhd_prot_dma_indx_get(dhd_pub_t
*dhd
, uint8 type
, uint16 ringid
)
7715 dhd_prot_t
*prot
= dhd
->prot
;
7716 uint16 max_h2d_rings
= dhd
->bus
->max_submission_rings
;
7719 case H2D_DMA_INDX_WR_UPD
:
7720 ptr
= (uint8
*)(prot
->h2d_dma_indx_wr_buf
.va
);
7721 offset
= DHD_H2D_RING_OFFSET(ringid
);
7724 case H2D_DMA_INDX_RD_UPD
:
7725 ptr
= (uint8
*)(prot
->h2d_dma_indx_rd_buf
.va
);
7726 offset
= DHD_H2D_RING_OFFSET(ringid
);
7729 case D2H_DMA_INDX_WR_UPD
:
7730 ptr
= (uint8
*)(prot
->d2h_dma_indx_wr_buf
.va
);
7731 offset
= DHD_D2H_RING_OFFSET(ringid
, max_h2d_rings
);
7734 case D2H_DMA_INDX_RD_UPD
:
7735 ptr
= (uint8
*)(prot
->d2h_dma_indx_rd_buf
.va
);
7736 offset
= DHD_D2H_RING_OFFSET(ringid
, max_h2d_rings
);
7740 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7745 ASSERT(prot
->rw_index_sz
!= 0);
7746 ptr
+= offset
* prot
->rw_index_sz
;
7748 OSL_CACHE_INV((void *)ptr
, prot
->rw_index_sz
);
7750 data
= LTOH16(*((uint16
*)ptr
));
7752 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7753 __FUNCTION__
, data
, type
, ringid
, ptr
, offset
));
7757 } /* dhd_prot_dma_indx_get */
7760 * An array of DMA read/write indices, containing information about host rings, can be maintained
7761 * either in host memory or in device memory, dependent on preprocessor options. This function is,
7762 * dependent on these options, called during driver initialization. It reserves and initializes
7763 * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
7764 * address of these host memory blocks are communicated to the dongle later on. By reading this host
7765 * memory, the dongle learns about the state of the host rings.
7769 dhd_prot_dma_indx_alloc(dhd_pub_t
*dhd
, uint8 type
,
7770 dhd_dma_buf_t
*dma_buf
, uint32 bufsz
)
7774 if ((dma_buf
->len
== bufsz
) || (dma_buf
->va
!= NULL
))
7777 rc
= dhd_dma_buf_alloc(dhd
, dma_buf
, bufsz
);
7783 dhd_prot_dma_indx_init(dhd_pub_t
*dhd
, uint32 rw_index_sz
, uint8 type
, uint32 length
)
7786 dhd_prot_t
*prot
= dhd
->prot
;
7787 dhd_dma_buf_t
*dma_buf
;
7790 DHD_ERROR(("prot is not inited\n"));
7794 /* Dongle advertizes 2B or 4B RW index size */
7795 ASSERT(rw_index_sz
!= 0);
7796 prot
->rw_index_sz
= rw_index_sz
;
7798 bufsz
= rw_index_sz
* length
;
7801 case H2D_DMA_INDX_WR_BUF
:
7802 dma_buf
= &prot
->h2d_dma_indx_wr_buf
;
7803 if (dhd_prot_dma_indx_alloc(dhd
, type
, dma_buf
, bufsz
))
7805 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
7806 dma_buf
->len
, rw_index_sz
, length
));
7809 case H2D_DMA_INDX_RD_BUF
:
7810 dma_buf
= &prot
->h2d_dma_indx_rd_buf
;
7811 if (dhd_prot_dma_indx_alloc(dhd
, type
, dma_buf
, bufsz
))
7813 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
7814 dma_buf
->len
, rw_index_sz
, length
));
7817 case D2H_DMA_INDX_WR_BUF
:
7818 dma_buf
= &prot
->d2h_dma_indx_wr_buf
;
7819 if (dhd_prot_dma_indx_alloc(dhd
, type
, dma_buf
, bufsz
))
7821 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
7822 dma_buf
->len
, rw_index_sz
, length
));
7825 case D2H_DMA_INDX_RD_BUF
:
7826 dma_buf
= &prot
->d2h_dma_indx_rd_buf
;
7827 if (dhd_prot_dma_indx_alloc(dhd
, type
, dma_buf
, bufsz
))
7829 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
7830 dma_buf
->len
, rw_index_sz
, length
));
7833 case H2D_IFRM_INDX_WR_BUF
:
7834 dma_buf
= &prot
->h2d_ifrm_indx_wr_buf
;
7835 if (dhd_prot_dma_indx_alloc(dhd
, type
, dma_buf
, bufsz
))
7837 DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
7838 dma_buf
->len
, rw_index_sz
, length
));
7842 DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__
));
7843 return BCME_BADOPTION
;
7849 DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
7850 __FUNCTION__
, type
, bufsz
));
7853 } /* dhd_prot_dma_indx_init */
7856 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
7857 * from, or NULL if there are no more messages to read.
7860 dhd_prot_get_read_addr(dhd_pub_t
*dhd
, msgbuf_ring_t
*ring
, uint32
*available_len
)
7866 void *read_addr
= NULL
; /* address of next msg to be read in ring */
7869 DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
7870 __FUNCTION__
, (uint32
*)(dhd
->prot
->d2h_dma_indx_rd_buf
.va
),
7871 (uint32
*)(dhd
->prot
->d2h_dma_indx_wr_buf
.va
)));
7873 /* Remember the read index in a variable.
7874 * This is becuase ring->rd gets updated in the end of this function
7875 * So if we have to print the exact read index from which the
7876 * message is read its not possible.
7878 ring
->curr_rd
= ring
->rd
;
7880 /* update write pointer */
7881 if (dhd
->dma_d2h_ring_upd_support
) {
7882 /* DMAing write/read indices supported */
7883 d2h_wr
= dhd_prot_dma_indx_get(dhd
, D2H_DMA_INDX_WR_UPD
, ring
->idx
);
7886 dhd_bus_cmn_readshared(dhd
->bus
, &(ring
->wr
), RING_WR_UPD
, ring
->idx
);
7891 depth
= ring
->max_items
;
7893 /* check for avail space, in number of ring items */
7894 items
= READ_AVAIL_SPACE(wr
, rd
, depth
);
7899 * Note that there are builds where Assert translates to just printk
7900 * so, even if we had hit this condition we would never halt. Now
7901 * dhd_prot_process_msgtype can get into an big loop if this
7904 if (items
> ring
->max_items
) {
7905 DHD_ERROR(("\r\n======================= \r\n"));
7906 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
7907 __FUNCTION__
, ring
, ring
->name
, ring
->max_items
, items
));
7908 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr
, rd
, depth
));
7909 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
7910 dhd
->busstate
, dhd
->bus
->wait_for_d3_ack
));
7911 DHD_ERROR(("\r\n======================= \r\n"));
7912 #ifdef SUPPORT_LINKDOWN_RECOVERY
7913 if (wr
>= ring
->max_items
) {
7914 dhd
->bus
->read_shm_fail
= TRUE
;
7917 #ifdef DHD_FW_COREDUMP
7918 if (dhd
->memdump_enabled
) {
7919 /* collect core dump */
7920 dhd
->memdump_type
= DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR
;
7921 dhd_bus_mem_dump(dhd
);
7924 #endif /* DHD_FW_COREDUMP */
7925 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7928 dhd_schedule_reset(dhd
);
7933 /* if space is available, calculate address to be read */
7934 read_addr
= (char*)ring
->dma_buf
.va
+ (rd
* ring
->item_len
);
7936 /* update read pointer */
7937 if ((ring
->rd
+ items
) >= ring
->max_items
)
7942 ASSERT(ring
->rd
< ring
->max_items
);
7944 /* convert items to bytes : available_len must be 32bits */
7945 *available_len
= (uint32
)(items
* ring
->item_len
);
7947 OSL_CACHE_INV(read_addr
, *available_len
);
7949 /* return read address */
7952 } /* dhd_prot_get_read_addr */
7955 * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
7956 * make sure the callers always hold appropriate locks.
7958 int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t
*dhd
, uint32 mb_data
)
7960 h2d_mailbox_data_t
*h2d_mb_data
;
7962 msgbuf_ring_t
*ctrl_ring
= &dhd
->prot
->h2dring_ctrl_subn
;
7963 unsigned long flags
;
7967 DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
7968 __FUNCTION__
, mb_data
));
7969 if (!ctrl_ring
->inited
) {
7970 DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__
));
7974 for (i
= 0; i
< num_post
; i
++) {
7975 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
7976 /* Request for ring buffer space */
7977 h2d_mb_data
= (h2d_mailbox_data_t
*)dhd_prot_alloc_ring_space(dhd
,
7978 ctrl_ring
, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
,
7981 if (h2d_mb_data
== NULL
) {
7982 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
7984 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
7988 memset(h2d_mb_data
, 0, sizeof(h2d_mailbox_data_t
));
7989 /* Common msg buf hdr */
7990 h2d_mb_data
->msg
.msg_type
= MSG_TYPE_H2D_MAILBOX_DATA
;
7991 h2d_mb_data
->msg
.flags
= ctrl_ring
->current_phase
;
7993 h2d_mb_data
->msg
.epoch
=
7994 ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
7995 ctrl_ring
->seqnum
++;
7997 /* Update flow create message */
7998 h2d_mb_data
->mail_box_data
= htol32(mb_data
);
8000 h2d_mb_data
->mail_box_data
= htol32(mb_data
);
8003 DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__
, mb_data
));
8005 /* upd wrt ptr and raise interrupt */
8006 dhd_prot_ring_write_complete_mbdata(dhd
, ctrl_ring
, h2d_mb_data
,
8007 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
, mb_data
);
8009 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
8015 /** Creates a flow ring and informs dongle of this event */
8017 dhd_prot_flow_ring_create(dhd_pub_t
*dhd
, flow_ring_node_t
*flow_ring_node
)
8019 tx_flowring_create_request_t
*flow_create_rqst
;
8020 msgbuf_ring_t
*flow_ring
;
8021 dhd_prot_t
*prot
= dhd
->prot
;
8022 unsigned long flags
;
8024 msgbuf_ring_t
*ctrl_ring
= &prot
->h2dring_ctrl_subn
;
8025 uint16 max_flowrings
= dhd
->bus
->max_tx_flowrings
;
8027 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
8028 flow_ring
= dhd_prot_flowrings_pool_fetch(dhd
, flow_ring_node
->flowid
);
8029 if (flow_ring
== NULL
) {
8030 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
8031 __FUNCTION__
, flow_ring_node
->flowid
));
8035 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
8037 /* Request for ctrl_ring buffer space */
8038 flow_create_rqst
= (tx_flowring_create_request_t
*)
8039 dhd_prot_alloc_ring_space(dhd
, ctrl_ring
, 1, &alloced
, FALSE
);
8041 if (flow_create_rqst
== NULL
) {
8042 dhd_prot_flowrings_pool_release(dhd
, flow_ring_node
->flowid
, flow_ring
);
8043 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
8044 __FUNCTION__
, flow_ring_node
->flowid
));
8045 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
8049 flow_ring_node
->prot_info
= (void *)flow_ring
;
8051 /* Common msg buf hdr */
8052 flow_create_rqst
->msg
.msg_type
= MSG_TYPE_FLOW_RING_CREATE
;
8053 flow_create_rqst
->msg
.if_id
= (uint8
)flow_ring_node
->flow_info
.ifindex
;
8054 flow_create_rqst
->msg
.request_id
= htol32(0); /* TBD */
8055 flow_create_rqst
->msg
.flags
= ctrl_ring
->current_phase
;
8057 flow_create_rqst
->msg
.epoch
= ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
8058 ctrl_ring
->seqnum
++;
8060 /* Update flow create message */
8061 flow_create_rqst
->tid
= flow_ring_node
->flow_info
.tid
;
8062 flow_create_rqst
->flow_ring_id
= htol16((uint16
)flow_ring_node
->flowid
);
8063 memcpy(flow_create_rqst
->sa
, flow_ring_node
->flow_info
.sa
, sizeof(flow_create_rqst
->sa
));
8064 memcpy(flow_create_rqst
->da
, flow_ring_node
->flow_info
.da
, sizeof(flow_create_rqst
->da
));
8065 /* CAUTION: ring::base_addr already in Little Endian */
8066 flow_create_rqst
->flow_ring_ptr
.low_addr
= flow_ring
->base_addr
.low_addr
;
8067 flow_create_rqst
->flow_ring_ptr
.high_addr
= flow_ring
->base_addr
.high_addr
;
8068 flow_create_rqst
->max_items
= htol16(prot
->h2d_max_txpost
);
8069 flow_create_rqst
->len_item
= htol16(H2DRING_TXPOST_ITEMSIZE
);
8071 /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
8072 * currently it is not used for priority. so uses solely for ifrm mask
8074 if (IFRM_ACTIVE(dhd
))
8075 flow_create_rqst
->priority_ifrmmask
= (1 << IFRM_DEV_0
);
8077 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
8078 " prio %d ifindex %d\n", __FUNCTION__
, flow_ring_node
->flowid
,
8079 MAC2STRDBG(flow_ring_node
->flow_info
.da
), flow_ring_node
->flow_info
.tid
,
8080 flow_ring_node
->flow_info
.ifindex
));
8082 /* Update the flow_ring's WRITE index */
8083 if (IDMA_ACTIVE(dhd
) || dhd
->dma_h2d_ring_upd_support
) {
8084 dhd_prot_dma_indx_set(dhd
, flow_ring
->wr
,
8085 H2D_DMA_INDX_WR_UPD
, flow_ring
->idx
);
8086 } else if (IFRM_ACTIVE(dhd
) && DHD_IS_FLOWRING(flow_ring
->idx
, max_flowrings
)) {
8087 dhd_prot_dma_indx_set(dhd
, flow_ring
->wr
,
8088 H2D_IFRM_INDX_WR_UPD
, flow_ring
->idx
);
8090 dhd_bus_cmn_writeshared(dhd
->bus
, &(flow_ring
->wr
),
8091 sizeof(uint16
), RING_WR_UPD
, flow_ring
->idx
);
8094 /* update control subn ring's WR index and ring doorbell to dongle */
8095 dhd_prot_ring_write_complete(dhd
, ctrl_ring
, flow_create_rqst
, 1);
8097 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
8100 } /* dhd_prot_flow_ring_create */
8102 /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
8104 dhd_prot_flow_ring_create_response_process(dhd_pub_t
*dhd
, void *msg
)
8106 tx_flowring_create_response_t
*flow_create_resp
= (tx_flowring_create_response_t
*)msg
;
8108 DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__
,
8109 ltoh16(flow_create_resp
->cmplt
.status
),
8110 ltoh16(flow_create_resp
->cmplt
.flow_ring_id
)));
8112 dhd_bus_flow_ring_create_response(dhd
->bus
,
8113 ltoh16(flow_create_resp
->cmplt
.flow_ring_id
),
8114 ltoh16(flow_create_resp
->cmplt
.status
));
8118 dhd_prot_process_h2d_ring_create_complete(dhd_pub_t
*dhd
, void *buf
)
8120 h2d_ring_create_response_t
*resp
= (h2d_ring_create_response_t
*)buf
;
8121 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__
,
8122 ltoh16(resp
->cmplt
.status
),
8123 ltoh16(resp
->cmplt
.ring_id
),
8124 ltoh32(resp
->cmn_hdr
.request_id
)));
8125 if ((ltoh32(resp
->cmn_hdr
.request_id
) != DHD_H2D_DBGRING_REQ_PKTID
) &&
8126 (ltoh32(resp
->cmn_hdr
.request_id
) != DHD_H2D_BTLOGRING_REQ_PKTID
)) {
8127 DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
8130 if (dhd
->prot
->h2dring_info_subn
->create_req_id
== ltoh32(resp
->cmn_hdr
.request_id
) &&
8131 !dhd
->prot
->h2dring_info_subn
->create_pending
) {
8132 DHD_ERROR(("info ring create status for not pending submit ring\n"));
8135 if (ltoh16(resp
->cmplt
.status
) != BCMPCIE_SUCCESS
) {
8136 DHD_ERROR(("info/btlog ring create failed with status %d\n",
8137 ltoh16(resp
->cmplt
.status
)));
8140 if (dhd
->prot
->h2dring_info_subn
->create_req_id
== ltoh32(resp
->cmn_hdr
.request_id
)) {
8141 dhd
->prot
->h2dring_info_subn
->create_pending
= FALSE
;
8142 dhd
->prot
->h2dring_info_subn
->inited
= TRUE
;
8143 DHD_ERROR(("info buffer post after ring create\n"));
8144 dhd_prot_infobufpost(dhd
, dhd
->prot
->h2dring_info_subn
);
8149 dhd_prot_process_d2h_ring_create_complete(dhd_pub_t
*dhd
, void *buf
)
8151 d2h_ring_create_response_t
*resp
= (d2h_ring_create_response_t
*)buf
;
8152 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__
,
8153 ltoh16(resp
->cmplt
.status
),
8154 ltoh16(resp
->cmplt
.ring_id
),
8155 ltoh32(resp
->cmn_hdr
.request_id
)));
8156 if ((ltoh32(resp
->cmn_hdr
.request_id
) != DHD_D2H_DBGRING_REQ_PKTID
) &&
8157 (ltoh32(resp
->cmn_hdr
.request_id
) != DHD_D2H_BTLOGRING_REQ_PKTID
)) {
8158 DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
8161 if (ltoh32(resp
->cmn_hdr
.request_id
) == DHD_D2H_DBGRING_REQ_PKTID
) {
8162 if (!dhd
->prot
->d2hring_info_cpln
->create_pending
) {
8163 DHD_ERROR(("info ring create status for not pending cpl ring\n"));
8167 if (ltoh16(resp
->cmplt
.status
) != BCMPCIE_SUCCESS
) {
8168 DHD_ERROR(("info cpl ring create failed with status %d\n",
8169 ltoh16(resp
->cmplt
.status
)));
8172 dhd
->prot
->d2hring_info_cpln
->create_pending
= FALSE
;
8173 dhd
->prot
->d2hring_info_cpln
->inited
= TRUE
;
8178 dhd_prot_process_d2h_mb_data(dhd_pub_t
*dhd
, void* buf
)
8180 d2h_mailbox_data_t
*d2h_data
;
8182 d2h_data
= (d2h_mailbox_data_t
*)buf
;
8183 DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__
,
8184 d2h_data
->d2h_mailbox_data
));
8185 dhd_bus_handle_mb_data(dhd
->bus
, d2h_data
->d2h_mailbox_data
);
8189 dhd_prot_process_d2h_host_ts_complete(dhd_pub_t
*dhd
, void* buf
)
8191 DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
8195 /** called on e.g. flow ring delete */
8196 void dhd_prot_clean_flow_ring(dhd_pub_t
*dhd
, void *msgbuf_flow_info
)
8198 msgbuf_ring_t
*flow_ring
= (msgbuf_ring_t
*)msgbuf_flow_info
;
8199 dhd_prot_ring_detach(dhd
, flow_ring
);
8200 DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__
));
8203 void dhd_prot_print_flow_ring(dhd_pub_t
*dhd
, void *msgbuf_flow_info
,
8204 struct bcmstrbuf
*strbuf
, const char * fmt
)
8206 const char *default_fmt
=
8207 "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
8208 "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
8209 msgbuf_ring_t
*flow_ring
= (msgbuf_ring_t
*)msgbuf_flow_info
;
8211 uint32 dma_buf_len
= flow_ring
->max_items
* flow_ring
->item_len
;
8217 if (dhd
->bus
->is_linkdown
) {
8218 DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__
));
8222 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, flow_ring
->idx
);
8223 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, flow_ring
->idx
);
8224 bcm_bprintf(strbuf
, fmt
, rd
, wr
, flow_ring
->dma_buf
.va
,
8225 ltoh32(flow_ring
->base_addr
.high_addr
),
8226 ltoh32(flow_ring
->base_addr
.low_addr
),
8227 flow_ring
->item_len
, flow_ring
->max_items
,
8231 void dhd_prot_print_info(dhd_pub_t
*dhd
, struct bcmstrbuf
*strbuf
)
8233 dhd_prot_t
*prot
= dhd
->prot
;
8234 bcm_bprintf(strbuf
, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
8235 dhd
->prot
->device_ipc_version
,
8236 dhd
->prot
->host_ipc_version
,
8237 dhd
->prot
->active_ipc_version
);
8239 bcm_bprintf(strbuf
, "max Host TS bufs to post: %d, \t posted %d \n",
8240 dhd
->prot
->max_tsbufpost
, dhd
->prot
->cur_ts_bufs_posted
);
8241 bcm_bprintf(strbuf
, "max INFO bufs to post: %d, \t posted %d \n",
8242 dhd
->prot
->max_infobufpost
, dhd
->prot
->infobufpost
);
8243 bcm_bprintf(strbuf
, "max event bufs to post: %d, \t posted %d \n",
8244 dhd
->prot
->max_eventbufpost
, dhd
->prot
->cur_event_bufs_posted
);
8245 bcm_bprintf(strbuf
, "max ioctlresp bufs to post: %d, \t posted %d \n",
8246 dhd
->prot
->max_ioctlrespbufpost
, dhd
->prot
->cur_ioctlresp_bufs_posted
);
8247 bcm_bprintf(strbuf
, "max RX bufs to post: %d, \t posted %d \n",
8248 dhd
->prot
->max_rxbufpost
, dhd
->prot
->rxbufpost
);
8251 "%14s %5s %5s %17s %17s %14s %14s %10s\n",
8252 "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
8253 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
8254 bcm_bprintf(strbuf
, "%14s", "H2DCtrlPost");
8255 dhd_prot_print_flow_ring(dhd
, &prot
->h2dring_ctrl_subn
, strbuf
,
8256 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8257 bcm_bprintf(strbuf
, "%14s", "D2HCtrlCpl");
8258 dhd_prot_print_flow_ring(dhd
, &prot
->d2hring_ctrl_cpln
, strbuf
,
8259 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8260 bcm_bprintf(strbuf
, "%14s", "H2DRxPost", prot
->rxbufpost
);
8261 dhd_prot_print_flow_ring(dhd
, &prot
->h2dring_rxp_subn
, strbuf
,
8262 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8263 bcm_bprintf(strbuf
, "%14s", "D2HRxCpl");
8264 dhd_prot_print_flow_ring(dhd
, &prot
->d2hring_rx_cpln
, strbuf
,
8265 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8266 bcm_bprintf(strbuf
, "%14s", "D2HTxCpl");
8267 dhd_prot_print_flow_ring(dhd
, &prot
->d2hring_tx_cpln
, strbuf
,
8268 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8269 if (dhd
->prot
->h2dring_info_subn
!= NULL
&& dhd
->prot
->d2hring_info_cpln
!= NULL
) {
8270 bcm_bprintf(strbuf
, "%14s", "H2DRingInfoSub");
8271 dhd_prot_print_flow_ring(dhd
, prot
->h2dring_info_subn
, strbuf
,
8272 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8273 bcm_bprintf(strbuf
, "%14s", "D2HRingInfoCpl");
8274 dhd_prot_print_flow_ring(dhd
, prot
->d2hring_info_cpln
, strbuf
,
8275 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
8278 bcm_bprintf(strbuf
, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
8279 OSL_ATOMIC_READ(dhd
->osh
, &dhd
->prot
->active_tx_count
),
8280 DHD_PKTID_AVAIL(dhd
->prot
->pktid_ctrl_map
),
8281 DHD_PKTID_AVAIL(dhd
->prot
->pktid_rx_map
),
8282 DHD_PKTID_AVAIL(dhd
->prot
->pktid_tx_map
));
8287 dhd_prot_flow_ring_delete(dhd_pub_t
*dhd
, flow_ring_node_t
*flow_ring_node
)
8289 tx_flowring_delete_request_t
*flow_delete_rqst
;
8290 dhd_prot_t
*prot
= dhd
->prot
;
8291 unsigned long flags
;
8293 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
8295 DHD_RING_LOCK(ring
->ring_lock
, flags
);
8297 /* Request for ring buffer space */
8298 flow_delete_rqst
= (tx_flowring_delete_request_t
*)
8299 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
8301 if (flow_delete_rqst
== NULL
) {
8302 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
8303 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__
));
8307 /* Common msg buf hdr */
8308 flow_delete_rqst
->msg
.msg_type
= MSG_TYPE_FLOW_RING_DELETE
;
8309 flow_delete_rqst
->msg
.if_id
= (uint8
)flow_ring_node
->flow_info
.ifindex
;
8310 flow_delete_rqst
->msg
.request_id
= htol32(0); /* TBD */
8311 flow_delete_rqst
->msg
.flags
= ring
->current_phase
;
8313 flow_delete_rqst
->msg
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
8316 /* Update Delete info */
8317 flow_delete_rqst
->flow_ring_id
= htol16((uint16
)flow_ring_node
->flowid
);
8318 flow_delete_rqst
->reason
= htol16(BCME_OK
);
8320 DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
8321 " prio %d ifindex %d\n", __FUNCTION__
, flow_ring_node
->flowid
,
8322 MAC2STRDBG(flow_ring_node
->flow_info
.da
), flow_ring_node
->flow_info
.tid
,
8323 flow_ring_node
->flow_info
.ifindex
));
8325 /* update ring's WR index and ring doorbell to dongle */
8326 dhd_prot_ring_write_complete(dhd
, ring
, flow_delete_rqst
, 1);
8328 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
8333 static void BCMFASTPATH
8334 dhd_prot_flow_ring_fastdelete(dhd_pub_t
*dhd
, uint16 flowid
, uint16 rd_idx
)
8336 flow_ring_node_t
*flow_ring_node
= DHD_FLOW_RING(dhd
, flowid
);
8337 msgbuf_ring_t
*ring
= (msgbuf_ring_t
*)flow_ring_node
->prot_info
;
8338 host_txbuf_cmpl_t txstatus
;
8339 host_txbuf_post_t
*txdesc
;
8342 DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
8343 __FUNCTION__
, flowid
, rd_idx
, ring
->wr
));
8345 memset(&txstatus
, 0, sizeof(txstatus
));
8346 txstatus
.compl_hdr
.flow_ring_id
= flowid
;
8347 txstatus
.cmn_hdr
.if_id
= flow_ring_node
->flow_info
.ifindex
;
8350 while (wr_idx
!= rd_idx
) {
8354 wr_idx
= ring
->max_items
- 1;
8355 txdesc
= (host_txbuf_post_t
*)((char *)DHD_RING_BGN_VA(ring
) +
8356 (wr_idx
* ring
->item_len
));
8357 txstatus
.cmn_hdr
.request_id
= txdesc
->cmn_hdr
.request_id
;
8358 dhd_prot_txstatus_process(dhd
, &txstatus
);
8363 dhd_prot_flow_ring_delete_response_process(dhd_pub_t
*dhd
, void *msg
)
8365 tx_flowring_delete_response_t
*flow_delete_resp
= (tx_flowring_delete_response_t
*)msg
;
8367 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__
,
8368 flow_delete_resp
->cmplt
.status
, flow_delete_resp
->cmplt
.flow_ring_id
));
8370 if (dhd
->fast_delete_ring_support
) {
8371 dhd_prot_flow_ring_fastdelete(dhd
, flow_delete_resp
->cmplt
.flow_ring_id
,
8372 flow_delete_resp
->read_idx
);
8374 dhd_bus_flow_ring_delete_response(dhd
->bus
, flow_delete_resp
->cmplt
.flow_ring_id
,
8375 flow_delete_resp
->cmplt
.status
);
8379 dhd_prot_process_flow_ring_resume_response(dhd_pub_t
*dhd
, void* msg
)
8381 #ifdef IDLE_TX_FLOW_MGMT
8382 tx_idle_flowring_resume_response_t
*flow_resume_resp
=
8383 (tx_idle_flowring_resume_response_t
*)msg
;
8385 DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__
,
8386 flow_resume_resp
->cmplt
.status
, flow_resume_resp
->cmplt
.flow_ring_id
));
8388 dhd_bus_flow_ring_resume_response(dhd
->bus
, flow_resume_resp
->cmplt
.flow_ring_id
,
8389 flow_resume_resp
->cmplt
.status
);
8390 #endif /* IDLE_TX_FLOW_MGMT */
8394 dhd_prot_process_flow_ring_suspend_response(dhd_pub_t
*dhd
, void* msg
)
8396 #ifdef IDLE_TX_FLOW_MGMT
8398 tx_idle_flowring_suspend_response_t
*flow_suspend_resp
=
8399 (tx_idle_flowring_suspend_response_t
*)msg
;
8400 status
= flow_suspend_resp
->cmplt
.status
;
8402 DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
8403 __FUNCTION__
, flow_suspend_resp
->cmplt
.flow_ring_id
,
8406 if (status
!= BCME_OK
) {
8408 DHD_ERROR(("%s Error in Suspending Flow rings!!"
8409 "Dongle will still be polling idle rings!!Status = %d \n",
8410 __FUNCTION__
, status
));
8412 #endif /* IDLE_TX_FLOW_MGMT */
8416 dhd_prot_flow_ring_flush(dhd_pub_t
*dhd
, flow_ring_node_t
*flow_ring_node
)
8418 tx_flowring_flush_request_t
*flow_flush_rqst
;
8419 dhd_prot_t
*prot
= dhd
->prot
;
8420 unsigned long flags
;
8422 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
8424 DHD_RING_LOCK(ring
->ring_lock
, flags
);
8426 /* Request for ring buffer space */
8427 flow_flush_rqst
= (tx_flowring_flush_request_t
*)
8428 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
8429 if (flow_flush_rqst
== NULL
) {
8430 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
8431 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__
));
8435 /* Common msg buf hdr */
8436 flow_flush_rqst
->msg
.msg_type
= MSG_TYPE_FLOW_RING_FLUSH
;
8437 flow_flush_rqst
->msg
.if_id
= (uint8
)flow_ring_node
->flow_info
.ifindex
;
8438 flow_flush_rqst
->msg
.request_id
= htol32(0); /* TBD */
8439 flow_flush_rqst
->msg
.flags
= ring
->current_phase
;
8440 flow_flush_rqst
->msg
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
8443 flow_flush_rqst
->flow_ring_id
= htol16((uint16
)flow_ring_node
->flowid
);
8444 flow_flush_rqst
->reason
= htol16(BCME_OK
);
8446 DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__
));
8448 /* update ring's WR index and ring doorbell to dongle */
8449 dhd_prot_ring_write_complete(dhd
, ring
, flow_flush_rqst
, 1);
8451 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
8454 } /* dhd_prot_flow_ring_flush */
8457 dhd_prot_flow_ring_flush_response_process(dhd_pub_t
*dhd
, void *msg
)
8459 tx_flowring_flush_response_t
*flow_flush_resp
= (tx_flowring_flush_response_t
*)msg
;
8461 DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__
,
8462 flow_flush_resp
->cmplt
.status
));
8464 dhd_bus_flow_ring_flush_response(dhd
->bus
, flow_flush_resp
->cmplt
.flow_ring_id
,
8465 flow_flush_resp
->cmplt
.status
);
8469 * Request dongle to configure soft doorbells for D2H rings. Host populated soft
8470 * doorbell information is transferred to dongle via the d2h ring config control
8474 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t
*dhd
)
8476 #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
8481 unsigned long flags
;
8482 dhd_prot_t
*prot
= dhd
->prot
;
8483 ring_config_req_t
*ring_config_req
;
8484 bcmpcie_soft_doorbell_t
*soft_doorbell
;
8485 msgbuf_ring_t
*ctrl_ring
= &prot
->h2dring_ctrl_subn
;
8486 const uint16 d2h_rings
= BCMPCIE_D2H_COMMON_MSGRINGS
;
8488 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
8489 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
8490 msg_start
= dhd_prot_alloc_ring_space(dhd
, ctrl_ring
, d2h_rings
, &alloced
, TRUE
);
8492 if (msg_start
== NULL
) {
8493 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
8494 __FUNCTION__
, d2h_rings
));
8495 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
8499 msg_next
= (uint8
*)msg_start
;
8501 for (ring_idx
= 0; ring_idx
< d2h_rings
; ring_idx
++) {
8503 /* position the ring_config_req into the ctrl subm ring */
8504 ring_config_req
= (ring_config_req_t
*)msg_next
;
8506 /* Common msg header */
8507 ring_config_req
->msg
.msg_type
= MSG_TYPE_D2H_RING_CONFIG
;
8508 ring_config_req
->msg
.if_id
= 0;
8509 ring_config_req
->msg
.flags
= 0;
8511 ring_config_req
->msg
.epoch
= ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
8512 ctrl_ring
->seqnum
++;
8514 ring_config_req
->msg
.request_id
= htol32(DHD_FAKE_PKTID
); /* unused */
8516 /* Ring Config subtype and d2h ring_id */
8517 ring_config_req
->subtype
= htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL
);
8518 ring_config_req
->ring_id
= htol16(DHD_D2H_RINGID(ring_idx
));
8520 /* Host soft doorbell configuration */
8521 soft_doorbell
= &prot
->soft_doorbell
[ring_idx
];
8523 ring_config_req
->soft_doorbell
.value
= htol32(soft_doorbell
->value
);
8524 ring_config_req
->soft_doorbell
.haddr
.high
=
8525 htol32(soft_doorbell
->haddr
.high
);
8526 ring_config_req
->soft_doorbell
.haddr
.low
=
8527 htol32(soft_doorbell
->haddr
.low
);
8528 ring_config_req
->soft_doorbell
.items
= htol16(soft_doorbell
->items
);
8529 ring_config_req
->soft_doorbell
.msecs
= htol16(soft_doorbell
->msecs
);
8531 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
8532 __FUNCTION__
, ring_config_req
->soft_doorbell
.haddr
.high
,
8533 ring_config_req
->soft_doorbell
.haddr
.low
,
8534 ring_config_req
->soft_doorbell
.value
));
8536 msg_next
= msg_next
+ ctrl_ring
->item_len
;
8539 /* update control subn ring's WR index and ring doorbell to dongle */
8540 dhd_prot_ring_write_complete(dhd
, ctrl_ring
, msg_start
, d2h_rings
);
8542 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
8544 #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
8548 dhd_prot_process_d2h_ring_config_complete(dhd_pub_t
*dhd
, void *msg
)
8550 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
8551 __FUNCTION__
, ltoh16(((ring_config_resp_t
*)msg
)->compl_hdr
.status
),
8552 ltoh16(((ring_config_resp_t
*)msg
)->compl_hdr
.flow_ring_id
)));
8555 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
8557 copy_ext_trap_sig(dhd_pub_t
*dhd
, trap_t
*tr
)
8559 uint32
*ext_data
= dhd
->extended_trap_data
;
8560 hnd_ext_trap_hdr_t
*hdr
;
8561 const bcm_tlv_t
*tlv
;
8563 if (ext_data
== NULL
) {
8566 /* First word is original trap_data */
8569 /* Followed by the extended trap data header */
8570 hdr
= (hnd_ext_trap_hdr_t
*)ext_data
;
8572 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_SIGNATURE
);
8574 memcpy(tr
, &tlv
->data
, sizeof(struct _trap_struct
));
8577 #define TRAP_T_NAME_OFFSET(var) {#var, OFFSETOF(trap_t, var)}
8580 char name
[HANG_INFO_TRAP_T_NAME_MAX
];
8584 static hang_info_trap_t hang_info_trap_tbl
[] = {
8586 {"ver", VENDOR_SEND_HANG_EXT_INFO_VER
},
8588 TRAP_T_NAME_OFFSET(type
),
8589 TRAP_T_NAME_OFFSET(epc
),
8590 TRAP_T_NAME_OFFSET(cpsr
),
8591 TRAP_T_NAME_OFFSET(spsr
),
8592 TRAP_T_NAME_OFFSET(r0
),
8593 TRAP_T_NAME_OFFSET(r1
),
8594 TRAP_T_NAME_OFFSET(r2
),
8595 TRAP_T_NAME_OFFSET(r3
),
8596 TRAP_T_NAME_OFFSET(r4
),
8597 TRAP_T_NAME_OFFSET(r5
),
8598 TRAP_T_NAME_OFFSET(r6
),
8599 TRAP_T_NAME_OFFSET(r7
),
8600 TRAP_T_NAME_OFFSET(r8
),
8601 TRAP_T_NAME_OFFSET(r9
),
8602 TRAP_T_NAME_OFFSET(r10
),
8603 TRAP_T_NAME_OFFSET(r11
),
8604 TRAP_T_NAME_OFFSET(r12
),
8605 TRAP_T_NAME_OFFSET(r13
),
8606 TRAP_T_NAME_OFFSET(r14
),
8607 TRAP_T_NAME_OFFSET(pc
),
8611 #define TAG_TRAP_IS_STATE(tag) \
8612 ((tag == TAG_TRAP_MEMORY) || (tag == TAG_TRAP_PCIE_Q) || (tag == TAG_TRAP_WLC_STATE))
8615 copy_hang_info_head(char *dest
, trap_t
*src
, int len
, int field_name
,
8616 int *bytes_written
, int *cnt
, char *cookie
)
8624 memset(dest
, 0, len
);
8625 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8627 /* hang reason, hang info ver */
8628 for (i
= 0; (i
< HANG_INFO_TRAP_T_SUBTYPE_IDX
) && (*cnt
< HANG_FIELD_CNT_MAX
);
8631 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8632 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%s:%c",
8633 hang_info_trap_tbl
[i
].name
, HANG_KEY_DEL
);
8635 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8636 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%d%c",
8637 hang_info_trap_tbl
[i
].offset
, HANG_KEY_DEL
);
8641 if (*cnt
< HANG_FIELD_CNT_MAX
) {
8643 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8644 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%s:%c",
8645 "cookie", HANG_KEY_DEL
);
8647 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8648 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%s%c",
8649 cookie
, HANG_KEY_DEL
);
8653 if (*cnt
< HANG_FIELD_CNT_MAX
) {
8655 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8656 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%s:%c",
8657 hang_info_trap_tbl
[HANG_INFO_TRAP_T_SUBTYPE_IDX
].name
,
8660 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8661 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%08x%c",
8662 hang_info_trap_tbl
[HANG_INFO_TRAP_T_SUBTYPE_IDX
].offset
,
8667 if (*cnt
< HANG_FIELD_CNT_MAX
) {
8669 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8670 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%s:%c",
8671 hang_info_trap_tbl
[HANG_INFO_TRAP_T_EPC_IDX
].name
,
8674 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8675 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%08x%c",
8677 (ptr
+ hang_info_trap_tbl
[HANG_INFO_TRAP_T_EPC_IDX
].offset
),
8684 copy_hang_info_trap_t(char *dest
, trap_t
*src
, int len
, int field_name
,
8685 int *bytes_written
, int *cnt
, char *cookie
)
8693 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8695 for (i
= HANG_INFO_TRAP_T_OFFSET_IDX
;
8696 (hang_info_trap_tbl
[i
].name
[0] != 0) && (*cnt
< HANG_FIELD_CNT_MAX
);
8699 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8700 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%c%s:",
8701 HANG_RAW_DEL
, hang_info_trap_tbl
[i
].name
);
8703 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8704 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%c%08x",
8705 HANG_RAW_DEL
, *(uint32
*)(ptr
+ hang_info_trap_tbl
[i
].offset
));
8710 copy_hang_info_stack(dhd_pub_t
*dhd
, char *dest
, int *bytes_written
, int *cnt
)
8714 const uint32
*stack
;
8715 uint32
*ext_data
= dhd
->extended_trap_data
;
8716 hnd_ext_trap_hdr_t
*hdr
;
8717 const bcm_tlv_t
*tlv
;
8718 int remain_stack_cnt
= 0;
8719 uint32 dummy_data
= 0;
8720 int bigdata_key_stack_cnt
= 0;
8722 if (ext_data
== NULL
) {
8725 /* First word is original trap_data */
8728 /* Followed by the extended trap data header */
8729 hdr
= (hnd_ext_trap_hdr_t
*)ext_data
;
8731 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_STACK
);
8733 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8736 stack
= (const uint32
*)tlv
->data
;
8738 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
,
8739 "%08x", *(uint32
*)(stack
++));
8741 if (*cnt
>= HANG_FIELD_CNT_MAX
) {
8744 for (i
= 1; i
< (uint32
)(tlv
->len
/ sizeof(uint32
)); i
++, bigdata_key_stack_cnt
++) {
8745 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8746 /* Raw data for bigdata use '_' and Key data for bigdata use space */
8747 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
,
8749 i
<= HANG_INFO_BIGDATA_KEY_STACK_CNT
? HANG_KEY_DEL
: HANG_RAW_DEL
,
8750 *(uint32
*)(stack
++));
8753 if ((*cnt
>= HANG_FIELD_CNT_MAX
) ||
8754 (i
>= HANG_FIELD_TRAP_T_STACK_CNT_MAX
)) {
8760 remain_stack_cnt
= HANG_FIELD_TRAP_T_STACK_CNT_MAX
- i
;
8762 for (i
= 0; i
< remain_stack_cnt
; i
++) {
8763 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8764 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%c%08x",
8765 HANG_RAW_DEL
, dummy_data
);
8767 if (*cnt
>= HANG_FIELD_CNT_MAX
) {
8775 get_hang_info_trap_subtype(dhd_pub_t
*dhd
, uint32
*subtype
)
8778 uint32
*ext_data
= dhd
->extended_trap_data
;
8779 hnd_ext_trap_hdr_t
*hdr
;
8780 const bcm_tlv_t
*tlv
;
8782 /* First word is original trap_data */
8785 /* Followed by the extended trap data header */
8786 hdr
= (hnd_ext_trap_hdr_t
*)ext_data
;
8788 /* Dump a list of all tags found before parsing data */
8789 for (i
= TAG_TRAP_DEEPSLEEP
; i
< TAG_TRAP_LAST
; i
++) {
8790 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, i
);
8792 if (!TAG_TRAP_IS_STATE(i
)) {
8801 copy_hang_info_specific(dhd_pub_t
*dhd
, char *dest
, int *bytes_written
, int *cnt
)
8806 uint32
*ext_data
= dhd
->extended_trap_data
;
8807 hnd_ext_trap_hdr_t
*hdr
;
8808 const bcm_tlv_t
*tlv
;
8809 int remain_trap_data
= 0;
8810 uint8 buf_u8
[sizeof(uint32
)] = { 0, };
8813 if (ext_data
== NULL
) {
8816 /* First word is original trap_data */
8819 /* Followed by the extended trap data header */
8820 hdr
= (hnd_ext_trap_hdr_t
*)ext_data
;
8822 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_SIGNATURE
);
8824 /* header include tlv hader */
8825 remain_trap_data
= (hdr
->len
- tlv
->len
- sizeof(uint16
));
8828 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_STACK
);
8830 /* header include tlv hader */
8831 remain_trap_data
-= (tlv
->len
+ sizeof(uint16
));
8834 data
= (const uint32
*)(hdr
->data
+ (hdr
->len
- remain_trap_data
));
8836 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8838 for (i
= 0; i
< (uint32
)(remain_trap_data
/ sizeof(uint32
)) && *cnt
< HANG_FIELD_CNT_MAX
;
8840 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8841 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%c%08x",
8842 HANG_RAW_DEL
, *(uint32
*)(data
++));
8845 if (*cnt
>= HANG_FIELD_CNT_MAX
) {
8849 remain_trap_data
-= (sizeof(uint32
) * i
);
8851 if (remain_trap_data
> sizeof(buf_u8
)) {
8852 DHD_ERROR(("%s: resize remain_trap_data\n", __FUNCTION__
));
8853 remain_trap_data
= sizeof(buf_u8
);
8856 if (remain_trap_data
) {
8857 p_u8
= (const uint8
*)data
;
8858 for (i
= 0; i
< remain_trap_data
; i
++) {
8859 buf_u8
[i
] = *(const uint8
*)(p_u8
++);
8862 remain_len
= VENDOR_SEND_HANG_EXT_INFO_LEN
- *bytes_written
;
8863 *bytes_written
+= scnprintf(&dest
[*bytes_written
], remain_len
, "%c%08x",
8864 HANG_RAW_DEL
, ltoh32_ua(buf_u8
));
8870 copy_hang_info_trap(dhd_pub_t
*dhd
)
8874 int trap_subtype
= 0;
8876 if (!dhd
|| !dhd
->hang_info
) {
8877 DHD_ERROR(("%s dhd=%p hang_info=%p\n", __FUNCTION__
,
8878 dhd
, (dhd
? dhd
->hang_info
: NULL
)));
8882 if (!dhd
->dongle_trap_occured
) {
8883 DHD_ERROR(("%s: dongle_trap_occured is FALSE\n", __FUNCTION__
));
8887 memset(&tr
, 0x00, sizeof(struct _trap_struct
));
8889 copy_ext_trap_sig(dhd
, &tr
);
8890 get_hang_info_trap_subtype(dhd
, &trap_subtype
);
8892 hang_info_trap_tbl
[HANG_INFO_TRAP_T_REASON_IDX
].offset
= HANG_REASON_DONGLE_TRAP
;
8893 hang_info_trap_tbl
[HANG_INFO_TRAP_T_SUBTYPE_IDX
].offset
= trap_subtype
;
8896 dhd
->hang_info_cnt
= 0;
8897 get_debug_dump_time(dhd
->debug_dump_time_hang_str
);
8898 copy_debug_dump_time(dhd
->debug_dump_time_str
, dhd
->debug_dump_time_hang_str
);
8900 copy_hang_info_head(dhd
->hang_info
, &tr
, VENDOR_SEND_HANG_EXT_INFO_LEN
, FALSE
,
8901 &bytes_written
, &dhd
->hang_info_cnt
, dhd
->debug_dump_time_hang_str
);
8903 DHD_INFO(("hang info haed cnt: %d len: %d data: %s\n",
8904 dhd
->hang_info_cnt
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
8906 clear_debug_dump_time(dhd
->debug_dump_time_hang_str
);
8908 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
8909 copy_hang_info_stack(dhd
, dhd
->hang_info
, &bytes_written
, &dhd
->hang_info_cnt
);
8910 DHD_INFO(("hang info stack cnt: %d len: %d data: %s\n",
8911 dhd
->hang_info_cnt
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
8914 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
8915 copy_hang_info_trap_t(dhd
->hang_info
, &tr
, VENDOR_SEND_HANG_EXT_INFO_LEN
, FALSE
,
8916 &bytes_written
, &dhd
->hang_info_cnt
, dhd
->debug_dump_time_hang_str
);
8917 DHD_INFO(("hang info trap_t cnt: %d len: %d data: %s\n",
8918 dhd
->hang_info_cnt
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
8921 if (dhd
->hang_info_cnt
< HANG_FIELD_CNT_MAX
) {
8922 copy_hang_info_specific(dhd
, dhd
->hang_info
, &bytes_written
, &dhd
->hang_info_cnt
);
8923 DHD_INFO(("hang info specific cnt: %d len: %d data: %s\n",
8924 dhd
->hang_info_cnt
, (int)strlen(dhd
->hang_info
), dhd
->hang_info
));
8928 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
8931 dhd_prot_debug_info_print(dhd_pub_t
*dhd
)
8933 dhd_prot_t
*prot
= dhd
->prot
;
8934 msgbuf_ring_t
*ring
;
8937 uint64 current_time
;
8939 DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
8940 DHD_ERROR(("DHD: %s\n", dhd_version
));
8941 DHD_ERROR(("Firmware: %s\n", fw_version
));
8943 DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
8944 DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
8945 prot
->device_ipc_version
,
8946 prot
->host_ipc_version
,
8947 prot
->active_ipc_version
));
8948 DHD_ERROR(("d2h_intr_method -> %s\n",
8949 dhd
->bus
->d2h_intr_method
? "PCIE_MSI" : "PCIE_INTX"));
8950 DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
8951 prot
->max_tsbufpost
, prot
->cur_ts_bufs_posted
));
8952 DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
8953 prot
->max_infobufpost
, prot
->infobufpost
));
8954 DHD_ERROR(("max event bufs to post: %d, posted %d\n",
8955 prot
->max_eventbufpost
, prot
->cur_event_bufs_posted
));
8956 DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
8957 prot
->max_ioctlrespbufpost
, prot
->cur_ioctlresp_bufs_posted
));
8958 DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
8959 prot
->max_rxbufpost
, prot
->rxbufpost
));
8960 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
8961 h2d_max_txpost
, prot
->h2d_max_txpost
));
8963 current_time
= OSL_LOCALTIME_NS();
8964 DHD_ERROR(("current_time="SEC_USEC_FMT
"\n", GET_SEC_USEC(current_time
)));
8965 DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
8966 " ioctl_ack_time="SEC_USEC_FMT
8967 " ioctl_cmplt_time="SEC_USEC_FMT
"\n",
8968 GET_SEC_USEC(prot
->ioctl_fillup_time
),
8969 GET_SEC_USEC(prot
->ioctl_ack_time
),
8970 GET_SEC_USEC(prot
->ioctl_cmplt_time
)));
8972 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
8974 ring
= &prot
->h2dring_ctrl_subn
;
8975 dma_buf_len
= ring
->max_items
* ring
->item_len
;
8976 DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8977 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
8978 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
8979 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
8980 if (dhd
->bus
->is_linkdown
) {
8981 DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
8982 " due to PCIe link down\r\n"));
8984 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
8985 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
8986 DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
8988 DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring
->seqnum
% H2D_EPOCH_MODULO
));
8990 ring
= &prot
->d2hring_ctrl_cpln
;
8991 dma_buf_len
= ring
->max_items
* ring
->item_len
;
8992 DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8993 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
8994 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
8995 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
8996 if (dhd
->bus
->is_linkdown
) {
8997 DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
8998 " due to PCIe link down\r\n"));
9000 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
9001 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
9002 DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
9004 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring
->seqnum
% H2D_EPOCH_MODULO
));
9006 ring
= prot
->h2dring_info_subn
;
9008 dma_buf_len
= ring
->max_items
* ring
->item_len
;
9009 DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9010 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
9011 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
9012 DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
9013 if (dhd
->bus
->is_linkdown
) {
9014 DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
9015 " due to PCIe link down\r\n"));
9017 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
9018 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
9019 DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
9021 DHD_ERROR(("InfoSub: seq num: %d \r\n", ring
->seqnum
% H2D_EPOCH_MODULO
));
9023 ring
= prot
->d2hring_info_cpln
;
9025 dma_buf_len
= ring
->max_items
* ring
->item_len
;
9026 DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9027 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
9028 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
9029 DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
9030 if (dhd
->bus
->is_linkdown
) {
9031 DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
9032 " due to PCIe link down\r\n"));
9034 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
9035 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
9036 DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
9038 DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring
->seqnum
% D2H_EPOCH_MODULO
));
9041 ring
= &prot
->d2hring_tx_cpln
;
9043 dma_buf_len
= ring
->max_items
* ring
->item_len
;
9044 DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9045 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
9046 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
9047 DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
9048 if (dhd
->bus
->is_linkdown
) {
9049 DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
9050 " due to PCIe link down\r\n"));
9052 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
9053 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
9054 DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
9056 DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring
->seqnum
% D2H_EPOCH_MODULO
));
9059 ring
= &prot
->d2hring_rx_cpln
;
9061 dma_buf_len
= ring
->max_items
* ring
->item_len
;
9062 DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
9063 ring
->dma_buf
.va
, ltoh32(ring
->base_addr
.high_addr
),
9064 ltoh32(ring
->base_addr
.low_addr
), dma_buf_len
));
9065 DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring
->rd
, ring
->wr
));
9066 if (dhd
->bus
->is_linkdown
) {
9067 DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
9068 " due to PCIe link down\r\n"));
9070 dhd_bus_cmn_readshared(dhd
->bus
, &rd
, RING_RD_UPD
, ring
->idx
);
9071 dhd_bus_cmn_readshared(dhd
->bus
, &wr
, RING_WR_UPD
, ring
->idx
);
9072 DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd
, wr
));
9074 DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring
->seqnum
% D2H_EPOCH_MODULO
));
9077 DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
9078 __FUNCTION__
, prot
->cur_ioctlresp_bufs_posted
, prot
->cur_event_bufs_posted
));
9080 dhd_pcie_debug_info_dump(dhd
);
9086 dhd_prot_ringupd_dump(dhd_pub_t
*dhd
, struct bcmstrbuf
*b
)
9091 if (dhd
->prot
->d2h_dma_indx_wr_buf
.va
) {
9093 uint32 max_h2d_queues
= dhd_bus_max_h2d_queues(dhd
->bus
);
9095 OSL_CACHE_INV((void *)dhd
->prot
->d2h_dma_indx_wr_buf
.va
,
9096 dhd
->prot
->d2h_dma_indx_wr_buf
.len
);
9098 ptr
= (uint32
*)(dhd
->prot
->d2h_dma_indx_wr_buf
.va
);
9100 bcm_bprintf(b
, "\n max_tx_queues %d\n", max_h2d_queues
);
9102 bcm_bprintf(b
, "\nRPTR block H2D common rings, 0x%04x\n", ptr
);
9103 value
= ltoh32(*ptr
);
9104 bcm_bprintf(b
, "\tH2D CTRL: value 0x%04x\n", value
);
9106 value
= ltoh32(*ptr
);
9107 bcm_bprintf(b
, "\tH2D RXPOST: value 0x%04x\n", value
);
9110 bcm_bprintf(b
, "RPTR block Flow rings , 0x%04x\n", ptr
);
9111 for (i
= BCMPCIE_H2D_COMMON_MSGRINGS
; i
< max_h2d_queues
; i
++) {
9112 value
= ltoh32(*ptr
);
9113 bcm_bprintf(b
, "\tflowring ID %d: value 0x%04x\n", i
, value
);
9118 if (dhd
->prot
->h2d_dma_indx_rd_buf
.va
) {
9119 OSL_CACHE_INV((void *)dhd
->prot
->h2d_dma_indx_rd_buf
.va
,
9120 dhd
->prot
->h2d_dma_indx_rd_buf
.len
);
9122 ptr
= (uint32
*)(dhd
->prot
->h2d_dma_indx_rd_buf
.va
);
9124 bcm_bprintf(b
, "\nWPTR block D2H common rings, 0x%04x\n", ptr
);
9125 value
= ltoh32(*ptr
);
9126 bcm_bprintf(b
, "\tD2H CTRLCPLT: value 0x%04x\n", value
);
9128 value
= ltoh32(*ptr
);
9129 bcm_bprintf(b
, "\tD2H TXCPLT: value 0x%04x\n", value
);
9131 value
= ltoh32(*ptr
);
9132 bcm_bprintf(b
, "\tD2H RXCPLT: value 0x%04x\n", value
);
9139 dhd_prot_metadata_dbg_set(dhd_pub_t
*dhd
, bool val
)
9141 dhd_prot_t
*prot
= dhd
->prot
;
9142 #if DHD_DBG_SHOW_METADATA
9143 prot
->metadata_dbg
= val
;
9145 return (uint32
)prot
->metadata_dbg
;
9149 dhd_prot_metadata_dbg_get(dhd_pub_t
*dhd
)
9151 dhd_prot_t
*prot
= dhd
->prot
;
9152 return (uint32
)prot
->metadata_dbg
;
9156 dhd_prot_metadatalen_set(dhd_pub_t
*dhd
, uint32 val
, bool rx
)
9158 dhd_prot_t
*prot
= dhd
->prot
;
9160 prot
->rx_metadata_offset
= (uint16
)val
;
9162 prot
->tx_metadata_offset
= (uint16
)val
;
9163 return dhd_prot_metadatalen_get(dhd
, rx
);
9167 dhd_prot_metadatalen_get(dhd_pub_t
*dhd
, bool rx
)
9169 dhd_prot_t
*prot
= dhd
->prot
;
9171 return prot
->rx_metadata_offset
;
9173 return prot
->tx_metadata_offset
;
9176 /** optimization to write "n" tx items at a time to ring */
9178 dhd_prot_txp_threshold(dhd_pub_t
*dhd
, bool set
, uint32 val
)
9180 dhd_prot_t
*prot
= dhd
->prot
;
9182 prot
->txp_threshold
= (uint16
)val
;
9183 val
= prot
->txp_threshold
;
9187 #ifdef DHD_RX_CHAINING
9189 static INLINE
void BCMFASTPATH
9190 dhd_rxchain_reset(rxchain_info_t
*rxchain
)
9192 rxchain
->pkt_count
= 0;
9195 static void BCMFASTPATH
9196 dhd_rxchain_frame(dhd_pub_t
*dhd
, void *pkt
, uint ifidx
)
9200 dhd_prot_t
*prot
= dhd
->prot
;
9201 rxchain_info_t
*rxchain
= &prot
->rxchain
;
9203 ASSERT(!PKTISCHAINED(pkt
));
9204 ASSERT(PKTCLINK(pkt
) == NULL
);
9205 ASSERT(PKTCGETATTR(pkt
) == 0);
9207 eh
= PKTDATA(dhd
->osh
, pkt
);
9208 prio
= IP_TOS46(eh
+ ETHER_HDR_LEN
) >> IPV4_TOS_PREC_SHIFT
;
9210 if (rxchain
->pkt_count
&& !(PKT_CTF_CHAINABLE(dhd
, ifidx
, eh
, prio
, rxchain
->h_sa
,
9211 rxchain
->h_da
, rxchain
->h_prio
))) {
9212 /* Different flow - First release the existing chain */
9213 dhd_rxchain_commit(dhd
);
9216 /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
9217 /* so that the chain can be handed off to CTF bridge as is. */
9218 if (rxchain
->pkt_count
== 0) {
9219 /* First packet in chain */
9220 rxchain
->pkthead
= rxchain
->pkttail
= pkt
;
9222 /* Keep a copy of ptr to ether_da, ether_sa and prio */
9223 rxchain
->h_da
= ((struct ether_header
*)eh
)->ether_dhost
;
9224 rxchain
->h_sa
= ((struct ether_header
*)eh
)->ether_shost
;
9225 rxchain
->h_prio
= prio
;
9226 rxchain
->ifidx
= ifidx
;
9227 rxchain
->pkt_count
++;
9229 /* Same flow - keep chaining */
9230 PKTSETCLINK(rxchain
->pkttail
, pkt
);
9231 rxchain
->pkttail
= pkt
;
9232 rxchain
->pkt_count
++;
9235 if ((dhd_rx_pkt_chainable(dhd
, ifidx
)) && (!ETHER_ISMULTI(rxchain
->h_da
)) &&
9236 ((((struct ether_header
*)eh
)->ether_type
== HTON16(ETHER_TYPE_IP
)) ||
9237 (((struct ether_header
*)eh
)->ether_type
== HTON16(ETHER_TYPE_IPV6
)))) {
9238 PKTSETCHAINED(dhd
->osh
, pkt
);
9239 PKTCINCRCNT(rxchain
->pkthead
);
9240 PKTCADDLEN(rxchain
->pkthead
, PKTLEN(dhd
->osh
, pkt
));
9242 dhd_rxchain_commit(dhd
);
9246 /* If we have hit the max chain length, dispatch the chain and reset */
9247 if (rxchain
->pkt_count
>= DHD_PKT_CTF_MAX_CHAIN_LEN
) {
9248 dhd_rxchain_commit(dhd
);
9252 static void BCMFASTPATH
9253 dhd_rxchain_commit(dhd_pub_t
*dhd
)
9255 dhd_prot_t
*prot
= dhd
->prot
;
9256 rxchain_info_t
*rxchain
= &prot
->rxchain
;
9258 if (rxchain
->pkt_count
== 0)
9261 /* Release the packets to dhd_linux */
9262 dhd_bus_rx_frame(dhd
->bus
, rxchain
->pkthead
, rxchain
->ifidx
, rxchain
->pkt_count
);
9264 /* Reset the chain */
9265 dhd_rxchain_reset(rxchain
);
9268 #endif /* DHD_RX_CHAINING */
9270 #ifdef IDLE_TX_FLOW_MGMT
9272 dhd_prot_flow_ring_resume(dhd_pub_t
*dhd
, flow_ring_node_t
*flow_ring_node
)
9274 tx_idle_flowring_resume_request_t
*flow_resume_rqst
;
9275 msgbuf_ring_t
*flow_ring
;
9276 dhd_prot_t
*prot
= dhd
->prot
;
9277 unsigned long flags
;
9279 msgbuf_ring_t
*ctrl_ring
= &prot
->h2dring_ctrl_subn
;
9281 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
9282 flow_ring
= dhd_prot_flowrings_pool_fetch(dhd
, flow_ring_node
->flowid
);
9283 if (flow_ring
== NULL
) {
9284 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
9285 __FUNCTION__
, flow_ring_node
->flowid
));
9289 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
9291 /* Request for ctrl_ring buffer space */
9292 flow_resume_rqst
= (tx_idle_flowring_resume_request_t
*)
9293 dhd_prot_alloc_ring_space(dhd
, ctrl_ring
, 1, &alloced
, FALSE
);
9295 if (flow_resume_rqst
== NULL
) {
9296 dhd_prot_flowrings_pool_release(dhd
, flow_ring_node
->flowid
, flow_ring
);
9297 DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
9298 __FUNCTION__
, flow_ring_node
->flowid
));
9299 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
9303 flow_ring_node
->prot_info
= (void *)flow_ring
;
9305 /* Common msg buf hdr */
9306 flow_resume_rqst
->msg
.msg_type
= MSG_TYPE_FLOW_RING_RESUME
;
9307 flow_resume_rqst
->msg
.if_id
= (uint8
)flow_ring_node
->flow_info
.ifindex
;
9308 flow_resume_rqst
->msg
.request_id
= htol32(0); /* TBD */
9310 flow_resume_rqst
->msg
.epoch
= ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
9311 ctrl_ring
->seqnum
++;
9313 flow_resume_rqst
->flow_ring_id
= htol16((uint16
)flow_ring_node
->flowid
);
9314 DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
9315 __FUNCTION__
, flow_ring_node
->flowid
));
9317 /* Update the flow_ring's WRITE index */
9318 if (IDMA_ACTIVE(dhd
) || dhd
->dma_h2d_ring_upd_support
) {
9319 dhd_prot_dma_indx_set(dhd
, flow_ring
->wr
,
9320 H2D_DMA_INDX_WR_UPD
, flow_ring
->idx
);
9321 } else if (IFRM_ACTIVE(dhd
) && (flow_ring
->idx
>= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START
)) {
9322 dhd_prot_dma_indx_set(dhd
, flow_ring
->wr
,
9323 H2D_IFRM_INDX_WR_UPD
,
9324 (flow_ring
->idx
- BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START
));
9326 dhd_bus_cmn_writeshared(dhd
->bus
, &(flow_ring
->wr
),
9327 sizeof(uint16
), RING_WR_UPD
, flow_ring
->idx
);
9330 /* update control subn ring's WR index and ring doorbell to dongle */
9331 dhd_prot_ring_write_complete(dhd
, ctrl_ring
, flow_resume_rqst
, 1);
9333 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
9336 } /* dhd_prot_flow_ring_create */
9339 dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t
*dhd
, uint16
*ringid
, uint16 count
)
9341 tx_idle_flowring_suspend_request_t
*flow_suspend_rqst
;
9342 dhd_prot_t
*prot
= dhd
->prot
;
9343 unsigned long flags
;
9346 msgbuf_ring_t
*ring
= &prot
->h2dring_ctrl_subn
;
9348 DHD_RING_LOCK(ring
->ring_lock
, flags
);
9350 /* Request for ring buffer space */
9351 flow_suspend_rqst
= (tx_idle_flowring_suspend_request_t
*)
9352 dhd_prot_alloc_ring_space(dhd
, ring
, 1, &alloced
, FALSE
);
9354 if (flow_suspend_rqst
== NULL
) {
9355 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
9356 DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__
));
9360 /* Common msg buf hdr */
9361 flow_suspend_rqst
->msg
.msg_type
= MSG_TYPE_FLOW_RING_SUSPEND
;
9362 /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
9363 flow_suspend_rqst
->msg
.request_id
= htol32(0); /* TBD */
9365 flow_suspend_rqst
->msg
.epoch
= ring
->seqnum
% H2D_EPOCH_MODULO
;
9368 /* Update flow id info */
9369 for (index
= 0; index
< count
; index
++)
9371 flow_suspend_rqst
->ring_id
[index
] = ringid
[index
];
9373 flow_suspend_rqst
->num
= count
;
9375 DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__
, count
));
9377 /* update ring's WR index and ring doorbell to dongle */
9378 dhd_prot_ring_write_complete(dhd
, ring
, flow_suspend_rqst
, 1);
9380 DHD_RING_UNLOCK(ring
->ring_lock
, flags
);
9384 #endif /* IDLE_TX_FLOW_MGMT */
9386 static const char* etd_trap_name(hnd_ext_tag_trap_t tag
)
9390 case TAG_TRAP_SIGNATURE
: return "TAG_TRAP_SIGNATURE";
9391 case TAG_TRAP_STACK
: return "TAG_TRAP_STACK";
9392 case TAG_TRAP_MEMORY
: return "TAG_TRAP_MEMORY";
9393 case TAG_TRAP_DEEPSLEEP
: return "TAG_TRAP_DEEPSLEEP";
9394 case TAG_TRAP_PSM_WD
: return "TAG_TRAP_PSM_WD";
9395 case TAG_TRAP_PHY
: return "TAG_TRAP_PHY";
9396 case TAG_TRAP_BUS
: return "TAG_TRAP_BUS";
9397 case TAG_TRAP_MAC_SUSP
: return "TAG_TRAP_MAC_SUSP";
9398 case TAG_TRAP_BACKPLANE
: return "TAG_TRAP_BACKPLANE";
9399 case TAG_TRAP_PCIE_Q
: return "TAG_TRAP_PCIE_Q";
9400 case TAG_TRAP_WLC_STATE
: return "TAG_TRAP_WLC_STATE";
9401 case TAG_TRAP_MAC_WAKE
: return "TAG_TRAP_MAC_WAKE";
9402 case TAG_TRAP_HMAP
: return "TAG_TRAP_HMAP";
9403 case TAG_TRAP_PHYTXERR_THRESH
: return "TAG_TRAP_PHYTXERR_THRESH";
9404 case TAG_TRAP_HC_DATA
: return "TAG_TRAP_HC_DATA";
9405 case TAG_TRAP_LOG_DATA
: return "TAG_TRAP_LOG_DATA";
9406 case TAG_TRAP_CODE
: return "TAG_TRAP_CODE";
9414 int dhd_prot_dump_extended_trap(dhd_pub_t
*dhdp
, struct bcmstrbuf
*b
, bool raw
)
9418 hnd_ext_trap_hdr_t
*hdr
;
9419 const bcm_tlv_t
*tlv
;
9421 const uint32
*stack
;
9422 const hnd_ext_trap_bp_err_t
*bpe
;
9425 ext_data
= dhdp
->extended_trap_data
;
9427 /* return if there is no extended trap data */
9428 if (!ext_data
|| !(dhdp
->dongle_trap_data
& D2H_DEV_EXT_TRAP_DATA
))
9430 bcm_bprintf(b
, "%d (0x%x)", dhdp
->dongle_trap_data
, dhdp
->dongle_trap_data
);
9434 bcm_bprintf(b
, "Extended trap data\n");
9436 /* First word is original trap_data */
9437 bcm_bprintf(b
, "trap_data = 0x%08x\n", *ext_data
);
9440 /* Followed by the extended trap data header */
9441 hdr
= (hnd_ext_trap_hdr_t
*)ext_data
;
9442 bcm_bprintf(b
, "version: %d, len: %d\n", hdr
->version
, hdr
->len
);
9444 /* Dump a list of all tags found before parsing data */
9445 bcm_bprintf(b
, "\nTags Found:\n");
9446 for (i
= 0; i
< TAG_TRAP_LAST
; i
++) {
9447 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, i
);
9449 bcm_bprintf(b
, "Tag: %d (%s), Length: %d\n", i
, etd_trap_name(i
), tlv
->len
);
9454 raw_len
= sizeof(hnd_ext_trap_hdr_t
) + (hdr
->len
/ 4) + (hdr
->len
% 4 ? 1 : 0);
9455 for (i
= 0; i
< raw_len
; i
++)
9457 bcm_bprintf(b
, "0x%08x ", ext_data
[i
]);
9459 bcm_bprintf(b
, "\n");
9464 /* Extract the various supported TLVs from the extended trap data */
9465 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_CODE
);
9468 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE
), tlv
->len
);
9469 bcm_bprintf(b
, "ETD TYPE: %d\n", tlv
->data
[0]);
9472 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_SIGNATURE
);
9475 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE
), tlv
->len
);
9476 tr
= (const trap_t
*)tlv
->data
;
9478 bcm_bprintf(b
, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
9479 tr
->type
, tr
->pc
, tr
->r14
, tr
->r13
, tr
->cpsr
, tr
->spsr
);
9480 bcm_bprintf(b
, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
9481 tr
->r0
, tr
->r1
, tr
->r2
, tr
->r3
, tr
->r4
, tr
->r5
, tr
->r6
);
9482 bcm_bprintf(b
, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
9483 tr
->r7
, tr
->r8
, tr
->r9
, tr
->r10
, tr
->r11
, tr
->r12
);
9486 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_STACK
);
9489 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK
), tlv
->len
);
9490 stack
= (const uint32
*)tlv
->data
;
9491 for (i
= 0; i
< (uint32
)(tlv
->len
/ 4); i
++)
9493 bcm_bprintf(b
, " 0x%08x\n", *stack
);
9498 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_BACKPLANE
);
9501 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE
), tlv
->len
);
9502 bpe
= (const hnd_ext_trap_bp_err_t
*)tlv
->data
;
9503 bcm_bprintf(b
, " error: %x\n", bpe
->error
);
9504 bcm_bprintf(b
, " coreid: %x\n", bpe
->coreid
);
9505 bcm_bprintf(b
, " baseaddr: %x\n", bpe
->baseaddr
);
9506 bcm_bprintf(b
, " ioctrl: %x\n", bpe
->ioctrl
);
9507 bcm_bprintf(b
, " iostatus: %x\n", bpe
->iostatus
);
9508 bcm_bprintf(b
, " resetctrl: %x\n", bpe
->resetctrl
);
9509 bcm_bprintf(b
, " resetstatus: %x\n", bpe
->resetstatus
);
9510 bcm_bprintf(b
, " errlogctrl: %x\n", bpe
->errlogctrl
);
9511 bcm_bprintf(b
, " errlogdone: %x\n", bpe
->errlogdone
);
9512 bcm_bprintf(b
, " errlogstatus: %x\n", bpe
->errlogstatus
);
9513 bcm_bprintf(b
, " errlogaddrlo: %x\n", bpe
->errlogaddrlo
);
9514 bcm_bprintf(b
, " errlogaddrhi: %x\n", bpe
->errlogaddrhi
);
9515 bcm_bprintf(b
, " errlogid: %x\n", bpe
->errlogid
);
9516 bcm_bprintf(b
, " errloguser: %x\n", bpe
->errloguser
);
9517 bcm_bprintf(b
, " errlogflags: %x\n", bpe
->errlogflags
);
9520 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_MEMORY
);
9523 const hnd_ext_trap_heap_err_t
* hme
;
9525 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY
), tlv
->len
);
9526 hme
= (const hnd_ext_trap_heap_err_t
*)tlv
->data
;
9527 bcm_bprintf(b
, " arena total: %d\n", hme
->arena_total
);
9528 bcm_bprintf(b
, " heap free: %d\n", hme
->heap_free
);
9529 bcm_bprintf(b
, " heap in use: %d\n", hme
->heap_inuse
);
9530 bcm_bprintf(b
, " mf count: %d\n", hme
->mf_count
);
9531 bcm_bprintf(b
, " stack LWM: %x\n", hme
->stack_lwm
);
9533 bcm_bprintf(b
, " Histogram:\n");
9534 for (i
= 0; i
< (HEAP_HISTOGRAM_DUMP_LEN
* 2); i
+= 2) {
9535 if (hme
->heap_histogm
[i
] == 0xfffe)
9536 bcm_bprintf(b
, " Others\t%d\t?\n", hme
->heap_histogm
[i
+ 1]);
9537 else if (hme
->heap_histogm
[i
] == 0xffff)
9538 bcm_bprintf(b
, " >= 256K\t%d\t?\n", hme
->heap_histogm
[i
+ 1]);
9540 bcm_bprintf(b
, " %d\t%d\t%d\n", hme
->heap_histogm
[i
] << 2,
9541 hme
->heap_histogm
[i
+ 1], (hme
->heap_histogm
[i
] << 2)
9542 * hme
->heap_histogm
[i
+ 1]);
9545 bcm_bprintf(b
, " Max free block: %d\n", hme
->max_sz_free_blk
[0] << 2);
9546 for (i
= 1; i
< HEAP_MAX_SZ_BLKS_LEN
; i
++) {
9547 bcm_bprintf(b
, " Next lgst free block: %d\n", hme
->max_sz_free_blk
[i
] << 2);
9551 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_PCIE_Q
);
9554 const hnd_ext_trap_pcie_mem_err_t
* pqme
;
9556 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q
), tlv
->len
);
9557 pqme
= (const hnd_ext_trap_pcie_mem_err_t
*)tlv
->data
;
9558 bcm_bprintf(b
, " d2h queue len: %x\n", pqme
->d2h_queue_len
);
9559 bcm_bprintf(b
, " d2h req queue len: %x\n", pqme
->d2h_req_queue_len
);
9562 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_WLC_STATE
);
9565 const hnd_ext_trap_wlc_mem_err_t
* wsme
;
9567 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE
), tlv
->len
);
9568 wsme
= (const hnd_ext_trap_wlc_mem_err_t
*)tlv
->data
;
9569 bcm_bprintf(b
, " instance: %d\n", wsme
->instance
);
9570 bcm_bprintf(b
, " associated: %d\n", wsme
->associated
);
9571 bcm_bprintf(b
, " peer count: %d\n", wsme
->peer_cnt
);
9572 bcm_bprintf(b
, " client count: %d\n", wsme
->soft_ap_client_cnt
);
9573 bcm_bprintf(b
, " TX_AC_BK_FIFO: %d\n", wsme
->txqueue_len
[0]);
9574 bcm_bprintf(b
, " TX_AC_BE_FIFO: %d\n", wsme
->txqueue_len
[1]);
9575 bcm_bprintf(b
, " TX_AC_VI_FIFO: %d\n", wsme
->txqueue_len
[2]);
9576 bcm_bprintf(b
, " TX_AC_VO_FIFO: %d\n", wsme
->txqueue_len
[3]);
9578 if (tlv
->len
>= (sizeof(*wsme
) * 2)) {
9580 bcm_bprintf(b
, "\n instance: %d\n", wsme
->instance
);
9581 bcm_bprintf(b
, " associated: %d\n", wsme
->associated
);
9582 bcm_bprintf(b
, " peer count: %d\n", wsme
->peer_cnt
);
9583 bcm_bprintf(b
, " client count: %d\n", wsme
->soft_ap_client_cnt
);
9584 bcm_bprintf(b
, " TX_AC_BK_FIFO: %d\n", wsme
->txqueue_len
[0]);
9585 bcm_bprintf(b
, " TX_AC_BE_FIFO: %d\n", wsme
->txqueue_len
[1]);
9586 bcm_bprintf(b
, " TX_AC_VI_FIFO: %d\n", wsme
->txqueue_len
[2]);
9587 bcm_bprintf(b
, " TX_AC_VO_FIFO: %d\n", wsme
->txqueue_len
[3]);
9591 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_PHY
);
9594 const hnd_ext_trap_phydbg_t
* phydbg
;
9595 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY
), tlv
->len
);
9596 phydbg
= (const hnd_ext_trap_phydbg_t
*)tlv
->data
;
9597 bcm_bprintf(b
, " err: 0x%x\n", phydbg
->err
);
9598 bcm_bprintf(b
, " RxFeStatus: 0x%x\n", phydbg
->RxFeStatus
);
9599 bcm_bprintf(b
, " TxFIFOStatus0: 0x%x\n", phydbg
->TxFIFOStatus0
);
9600 bcm_bprintf(b
, " TxFIFOStatus1: 0x%x\n", phydbg
->TxFIFOStatus1
);
9601 bcm_bprintf(b
, " RfseqMode: 0x%x\n", phydbg
->RfseqMode
);
9602 bcm_bprintf(b
, " RfseqStatus0: 0x%x\n", phydbg
->RfseqStatus0
);
9603 bcm_bprintf(b
, " RfseqStatus1: 0x%x\n", phydbg
->RfseqStatus1
);
9604 bcm_bprintf(b
, " RfseqStatus_Ocl: 0x%x\n", phydbg
->RfseqStatus_Ocl
);
9605 bcm_bprintf(b
, " RfseqStatus_Ocl1: 0x%x\n", phydbg
->RfseqStatus_Ocl1
);
9606 bcm_bprintf(b
, " OCLControl1: 0x%x\n", phydbg
->OCLControl1
);
9607 bcm_bprintf(b
, " TxError: 0x%x\n", phydbg
->TxError
);
9608 bcm_bprintf(b
, " bphyTxError: 0x%x\n", phydbg
->bphyTxError
);
9609 bcm_bprintf(b
, " TxCCKError: 0x%x\n", phydbg
->TxCCKError
);
9610 bcm_bprintf(b
, " TxCtrlWrd0: 0x%x\n", phydbg
->TxCtrlWrd0
);
9611 bcm_bprintf(b
, " TxCtrlWrd1: 0x%x\n", phydbg
->TxCtrlWrd1
);
9612 bcm_bprintf(b
, " TxCtrlWrd2: 0x%x\n", phydbg
->TxCtrlWrd2
);
9613 bcm_bprintf(b
, " TxLsig0: 0x%x\n", phydbg
->TxLsig0
);
9614 bcm_bprintf(b
, " TxLsig1: 0x%x\n", phydbg
->TxLsig1
);
9615 bcm_bprintf(b
, " TxVhtSigA10: 0x%x\n", phydbg
->TxVhtSigA10
);
9616 bcm_bprintf(b
, " TxVhtSigA11: 0x%x\n", phydbg
->TxVhtSigA11
);
9617 bcm_bprintf(b
, " TxVhtSigA20: 0x%x\n", phydbg
->TxVhtSigA20
);
9618 bcm_bprintf(b
, " TxVhtSigA21: 0x%x\n", phydbg
->TxVhtSigA21
);
9619 bcm_bprintf(b
, " txPktLength: 0x%x\n", phydbg
->txPktLength
);
9620 bcm_bprintf(b
, " txPsdulengthCtr: 0x%x\n", phydbg
->txPsdulengthCtr
);
9621 bcm_bprintf(b
, " gpioClkControl: 0x%x\n", phydbg
->gpioClkControl
);
9622 bcm_bprintf(b
, " gpioSel: 0x%x\n", phydbg
->gpioSel
);
9623 bcm_bprintf(b
, " pktprocdebug: 0x%x\n", phydbg
->pktprocdebug
);
9624 for (i
= 0; i
< 3; i
++)
9625 bcm_bprintf(b
, " gpioOut[%d]: 0x%x\n", i
, phydbg
->gpioOut
[i
]);
9628 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_PSM_WD
);
9631 const hnd_ext_trap_psmwd_t
* psmwd
;
9632 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD
), tlv
->len
);
9633 psmwd
= (const hnd_ext_trap_psmwd_t
*)tlv
;
9634 bcm_bprintf(b
, " version: 0x%x\n", psmwd
->version
);
9635 bcm_bprintf(b
, " maccontrol: 0x%x\n", psmwd
->i32_maccontrol
);
9636 bcm_bprintf(b
, " maccommand: 0x%x\n", psmwd
->i32_maccommand
);
9637 bcm_bprintf(b
, " macintstatus: 0x%x\n", psmwd
->i32_macintstatus
);
9638 bcm_bprintf(b
, " phydebug: 0x%x\n", psmwd
->i32_phydebug
);
9639 bcm_bprintf(b
, " clk_ctl_st: 0x%x\n", psmwd
->i32_clk_ctl_st
);
9640 for (i
= 0; i
< 3; i
++)
9641 bcm_bprintf(b
, " psmdebug[%d]: 0x%x\n", i
, psmwd
->i32_psmdebug
[i
]);
9642 bcm_bprintf(b
, " gated clock en: 0x%x\n", psmwd
->i16_0x1a8
);
9643 bcm_bprintf(b
, " Rcv Fifo Ctrl: 0x%x\n", psmwd
->i16_0x406
);
9644 bcm_bprintf(b
, " Rx ctrl 1: 0x%x\n", psmwd
->i16_0x408
);
9645 bcm_bprintf(b
, " Rxe Status 1: 0x%x\n", psmwd
->i16_0x41a
);
9646 bcm_bprintf(b
, " Rxe Status 2: 0x%x\n", psmwd
->i16_0x41c
);
9647 bcm_bprintf(b
, " rcv wrd count 0: 0x%x\n", psmwd
->i16_0x424
);
9648 bcm_bprintf(b
, " rcv wrd count 1: 0x%x\n", psmwd
->i16_0x426
);
9649 bcm_bprintf(b
, " RCV_LFIFO_STS: 0x%x\n", psmwd
->i16_0x456
);
9650 bcm_bprintf(b
, " PSM_SLP_TMR: 0x%x\n", psmwd
->i16_0x480
);
9651 bcm_bprintf(b
, " PSM BRC: 0x%x\n", psmwd
->i16_0x490
);
9652 bcm_bprintf(b
, " TXE CTRL: 0x%x\n", psmwd
->i16_0x500
);
9653 bcm_bprintf(b
, " TXE Status: 0x%x\n", psmwd
->i16_0x50e
);
9654 bcm_bprintf(b
, " TXE_xmtdmabusy: 0x%x\n", psmwd
->i16_0x55e
);
9655 bcm_bprintf(b
, " TXE_XMTfifosuspflush: 0x%x\n", psmwd
->i16_0x566
);
9656 bcm_bprintf(b
, " IFS Stat: 0x%x\n", psmwd
->i16_0x690
);
9657 bcm_bprintf(b
, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd
->i16_0x692
);
9658 bcm_bprintf(b
, " IFS_TX_DUR: 0x%x\n", psmwd
->i16_0x694
);
9659 bcm_bprintf(b
, " SLow_CTL: 0x%x\n", psmwd
->i16_0x6a0
);
9660 bcm_bprintf(b
, " TXE_AQM fifo Ready: 0x%x\n", psmwd
->i16_0x838
);
9661 bcm_bprintf(b
, " Dagg ctrl: 0x%x\n", psmwd
->i16_0x8c0
);
9662 bcm_bprintf(b
, " shm_prewds_cnt: 0x%x\n", psmwd
->shm_prewds_cnt
);
9663 bcm_bprintf(b
, " shm_txtplufl_cnt: 0x%x\n", psmwd
->shm_txtplufl_cnt
);
9664 bcm_bprintf(b
, " shm_txphyerr_cnt: 0x%x\n", psmwd
->shm_txphyerr_cnt
);
9667 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_MAC_SUSP
);
9670 const hnd_ext_trap_macsusp_t
* macsusp
;
9671 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP
), tlv
->len
);
9672 macsusp
= (const hnd_ext_trap_macsusp_t
*)tlv
;
9673 bcm_bprintf(b
, " version: %d\n", macsusp
->version
);
9674 bcm_bprintf(b
, " trap_reason: %d\n", macsusp
->trap_reason
);
9675 bcm_bprintf(b
, " maccontrol: 0x%x\n", macsusp
->i32_maccontrol
);
9676 bcm_bprintf(b
, " maccommand: 0x%x\n", macsusp
->i32_maccommand
);
9677 bcm_bprintf(b
, " macintstatus: 0x%x\n", macsusp
->i32_macintstatus
);
9678 for (i
= 0; i
< 4; i
++)
9679 bcm_bprintf(b
, " phydebug[%d]: 0x%x\n", i
, macsusp
->i32_phydebug
[i
]);
9680 for (i
= 0; i
< 8; i
++)
9681 bcm_bprintf(b
, " psmdebug[%d]: 0x%x\n", i
, macsusp
->i32_psmdebug
[i
]);
9682 bcm_bprintf(b
, " Rxe Status_1: 0x%x\n", macsusp
->i16_0x41a
);
9683 bcm_bprintf(b
, " Rxe Status_2: 0x%x\n", macsusp
->i16_0x41c
);
9684 bcm_bprintf(b
, " PSM BRC: 0x%x\n", macsusp
->i16_0x490
);
9685 bcm_bprintf(b
, " TXE Status: 0x%x\n", macsusp
->i16_0x50e
);
9686 bcm_bprintf(b
, " TXE xmtdmabusy: 0x%x\n", macsusp
->i16_0x55e
);
9687 bcm_bprintf(b
, " TXE XMTfifosuspflush: 0x%x\n", macsusp
->i16_0x566
);
9688 bcm_bprintf(b
, " IFS Stat: 0x%x\n", macsusp
->i16_0x690
);
9689 bcm_bprintf(b
, " IFS MEDBUSY CTR: 0x%x\n", macsusp
->i16_0x692
);
9690 bcm_bprintf(b
, " IFS TX DUR: 0x%x\n", macsusp
->i16_0x694
);
9691 bcm_bprintf(b
, " WEP CTL: 0x%x\n", macsusp
->i16_0x7c0
);
9692 bcm_bprintf(b
, " TXE AQM fifo Ready: 0x%x\n", macsusp
->i16_0x838
);
9693 bcm_bprintf(b
, " MHP status: 0x%x\n", macsusp
->i16_0x880
);
9694 bcm_bprintf(b
, " shm_prewds_cnt: 0x%x\n", macsusp
->shm_prewds_cnt
);
9695 bcm_bprintf(b
, " shm_ucode_dbgst: 0x%x\n", macsusp
->shm_ucode_dbgst
);
9698 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_MAC_WAKE
);
9701 const hnd_ext_trap_macenab_t
* macwake
;
9702 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE
), tlv
->len
);
9703 macwake
= (const hnd_ext_trap_macenab_t
*)tlv
;
9704 bcm_bprintf(b
, " version: 0x%x\n", macwake
->version
);
9705 bcm_bprintf(b
, " trap_reason: 0x%x\n", macwake
->trap_reason
);
9706 bcm_bprintf(b
, " maccontrol: 0x%x\n", macwake
->i32_maccontrol
);
9707 bcm_bprintf(b
, " maccommand: 0x%x\n", macwake
->i32_maccommand
);
9708 bcm_bprintf(b
, " macintstatus: 0x%x\n", macwake
->i32_macintstatus
);
9709 for (i
= 0; i
< 8; i
++)
9710 bcm_bprintf(b
, " psmdebug[%d]: 0x%x\n", i
, macwake
->i32_psmdebug
[i
]);
9711 bcm_bprintf(b
, " clk_ctl_st: 0x%x\n", macwake
->i32_clk_ctl_st
);
9712 bcm_bprintf(b
, " powerctl: 0x%x\n", macwake
->i32_powerctl
);
9713 bcm_bprintf(b
, " gated clock en: 0x%x\n", macwake
->i16_0x1a8
);
9714 bcm_bprintf(b
, " PSM_SLP_TMR: 0x%x\n", macwake
->i16_0x480
);
9715 bcm_bprintf(b
, " PSM BRC: 0x%x\n", macwake
->i16_0x490
);
9716 bcm_bprintf(b
, " TSF CTL: 0x%x\n", macwake
->i16_0x600
);
9717 bcm_bprintf(b
, " IFS Stat: 0x%x\n", macwake
->i16_0x690
);
9718 bcm_bprintf(b
, " IFS_MEDBUSY_CTR: 0x%x\n", macwake
->i16_0x692
);
9719 bcm_bprintf(b
, " Slow_CTL: 0x%x\n", macwake
->i16_0x6a0
);
9720 bcm_bprintf(b
, " Slow_FRAC: 0x%x\n", macwake
->i16_0x6a6
);
9721 bcm_bprintf(b
, " fast power up delay: 0x%x\n", macwake
->i16_0x6a8
);
9722 bcm_bprintf(b
, " Slow_PER: 0x%x\n", macwake
->i16_0x6aa
);
9723 bcm_bprintf(b
, " shm_ucode_dbgst: 0x%x\n", macwake
->shm_ucode_dbgst
);
9726 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_BUS
);
9729 const bcm_dngl_pcie_hc_t
* hc
;
9730 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS
), tlv
->len
);
9731 hc
= (const bcm_dngl_pcie_hc_t
*)tlv
->data
;
9732 bcm_bprintf(b
, " version: 0x%x\n", hc
->version
);
9733 bcm_bprintf(b
, " reserved: 0x%x\n", hc
->reserved
);
9734 bcm_bprintf(b
, " pcie_err_ind_type: 0x%x\n", hc
->pcie_err_ind_type
);
9735 bcm_bprintf(b
, " pcie_flag: 0x%x\n", hc
->pcie_flag
);
9736 bcm_bprintf(b
, " pcie_control_reg: 0x%x\n", hc
->pcie_control_reg
);
9737 for (i
= 0; i
< HC_PCIEDEV_CONFIG_REGLIST_MAX
; i
++)
9738 bcm_bprintf(b
, " pcie_config_regs[%d]: 0x%x\n", i
, hc
->pcie_config_regs
[i
]);
9741 tlv
= bcm_parse_tlvs(hdr
->data
, hdr
->len
, TAG_TRAP_HMAP
);
9744 const pcie_hmapviolation_t
* hmap
;
9745 hmap
= (const pcie_hmapviolation_t
*)tlv
->data
;
9746 bcm_bprintf(b
, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP
), tlv
->len
);
9747 bcm_bprintf(b
, " HMAP Vio Addr Low: 0x%x\n", hmap
->hmap_violationaddr_lo
);
9748 bcm_bprintf(b
, " HMAP Vio Addr Hi: 0x%x\n", hmap
->hmap_violationaddr_hi
);
9749 bcm_bprintf(b
, " HMAP Vio Info: 0x%x\n", hmap
->hmap_violation_info
);
9757 dhd_prot_send_host_timestamp(dhd_pub_t
*dhdp
, uchar
*tlvs
, uint16 tlv_len
,
9758 uint16 seqnum
, uint16 xt_id
)
9760 dhd_prot_t
*prot
= dhdp
->prot
;
9761 host_timestamp_msg_t
*ts_req
;
9762 unsigned long flags
;
9765 msgbuf_ring_t
*ctrl_ring
= &prot
->h2dring_ctrl_subn
;
9767 if ((tlvs
== NULL
) || (tlv_len
== 0)) {
9768 DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
9769 __FUNCTION__
, tlvs
, tlv_len
));
9773 DHD_RING_LOCK(ctrl_ring
->ring_lock
, flags
);
9775 /* if Host TS req already pending go away */
9776 if (prot
->hostts_req_buf_inuse
== TRUE
) {
9777 DHD_ERROR(("one host TS request already pending at device\n"));
9778 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
9782 /* Request for cbuf space */
9783 ts_req
= (host_timestamp_msg_t
*)dhd_prot_alloc_ring_space(dhdp
, ctrl_ring
,
9784 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
, &alloced
, FALSE
);
9785 if (ts_req
== NULL
) {
9786 DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
9787 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
9791 /* Common msg buf hdr */
9792 ts_req
->msg
.msg_type
= MSG_TYPE_HOSTTIMSTAMP
;
9793 ts_req
->msg
.if_id
= 0;
9794 ts_req
->msg
.flags
= ctrl_ring
->current_phase
;
9795 ts_req
->msg
.request_id
= DHD_H2D_HOSTTS_REQ_PKTID
;
9797 ts_req
->msg
.epoch
= ctrl_ring
->seqnum
% H2D_EPOCH_MODULO
;
9798 ctrl_ring
->seqnum
++;
9800 ts_req
->xt_id
= xt_id
;
9801 ts_req
->seqnum
= seqnum
;
9802 /* populate TS req buffer info */
9803 ts_req
->input_data_len
= htol16(tlv_len
);
9804 ts_req
->host_buf_addr
.high
= htol32(PHYSADDRHI(prot
->hostts_req_buf
.pa
));
9805 ts_req
->host_buf_addr
.low
= htol32(PHYSADDRLO(prot
->hostts_req_buf
.pa
));
9806 /* copy ioct payload */
9807 ts_tlv_buf
= (void *) prot
->hostts_req_buf
.va
;
9808 prot
->hostts_req_buf_inuse
= TRUE
;
9809 memcpy(ts_tlv_buf
, tlvs
, tlv_len
);
9811 OSL_CACHE_FLUSH((void *) prot
->hostts_req_buf
.va
, tlv_len
);
9813 if (ISALIGNED(ts_tlv_buf
, DMA_ALIGN_LEN
) == FALSE
) {
9814 DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
9817 DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
9818 ts_req
->msg
.request_id
, ts_req
->input_data_len
,
9819 ts_req
->xt_id
, ts_req
->seqnum
));
9821 /* upd wrt ptr and raise interrupt */
9822 dhd_prot_ring_write_complete(dhdp
, ctrl_ring
, ts_req
,
9823 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D
);
9825 DHD_RING_UNLOCK(ctrl_ring
->ring_lock
, flags
);
9828 } /* dhd_prot_send_host_timestamp */
9831 dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t
*dhd
, bool enable
, bool set
)
9834 dhd
->prot
->tx_ts_log_enabled
= enable
;
9836 return dhd
->prot
->tx_ts_log_enabled
;
9840 dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t
*dhd
, bool enable
, bool set
)
9843 dhd
->prot
->rx_ts_log_enabled
= enable
;
9845 return dhd
->prot
->rx_ts_log_enabled
;
9849 dhd_prot_pkt_noretry(dhd_pub_t
*dhd
, bool enable
, bool set
)
9852 dhd
->prot
->no_retry
= enable
;
9854 return dhd
->prot
->no_retry
;
9858 dhd_prot_pkt_noaggr(dhd_pub_t
*dhd
, bool enable
, bool set
)
9861 dhd
->prot
->no_aggr
= enable
;
9863 return dhd
->prot
->no_aggr
;
9867 dhd_prot_pkt_fixed_rate(dhd_pub_t
*dhd
, bool enable
, bool set
)
9870 dhd
->prot
->fixed_rate
= enable
;
9872 return dhd
->prot
->fixed_rate
;
9874 #endif /* BCMPCIE */
9877 dhd_prot_dma_indx_free(dhd_pub_t
*dhd
)
9879 dhd_prot_t
*prot
= dhd
->prot
;
9881 dhd_dma_buf_free(dhd
, &prot
->h2d_dma_indx_wr_buf
);
9882 dhd_dma_buf_free(dhd
, &prot
->d2h_dma_indx_rd_buf
);
9886 dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t
*dhd
)
9888 if (dhd
->prot
->max_tsbufpost
> 0)
9889 dhd_msgbuf_rxbuf_post_ts_bufs(dhd
);
9892 static void BCMFASTPATH
9893 dhd_prot_process_fw_timestamp(dhd_pub_t
*dhd
, void* buf
)
9895 DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
9900 dhd_prot_get_ioctl_trans_id(dhd_pub_t
*dhdp
)
9902 return dhdp
->prot
->ioctl_trans_id
;