2 * Neighbor Awareness Networking
4 * Copyright (C) 1999-2019, Broadcom.
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
24 * <<Broadcom-WL-IPTag/Open:>>
26 * $Id: wl_cfgnan.c 759402 2018-04-25 10:01:49Z $
31 #include <bcmendian.h>
32 #include <bcmwifi_channels.h>
36 #include <wl_cfg80211.h>
37 #include <wl_android.h>
38 #include <wl_cfgnan.h>
40 #include <dngl_stats.h>
42 #include <wl_cfgvendor.h>
44 #include <wl_cfgp2p.h>
46 #define NAN_RANGE_REQ_CMD 0
47 #define NAN_RANGE_REQ_EVNT 1
48 #define NAN_RAND_MAC_RETRIES 10
49 #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
51 #ifdef WL_NAN_DISC_CACHE
52 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211
*cfg
, void * data
);
53 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211
* cfg
, uint8 local_subid
);
54 static nan_disc_result_cache
* wl_cfgnan_get_disc_result(struct bcm_cfg80211
*cfg
,
55 uint8 remote_pubid
, struct ether_addr
*peer
);
56 #endif /* WL_NAN_DISC_CACHE */
57 static void wl_cfgnan_update_dp_mask(struct bcm_cfg80211
*cfg
, bool enable
, u8 nan_dp_id
);
59 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211
*cfg
);
61 static const char *nan_role_to_str(u8 role
)
65 C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC
)
66 C2S(WL_NAN_ROLE_NON_MASTER_SYNC
)
67 C2S(WL_NAN_ROLE_MASTER
)
68 C2S(WL_NAN_ROLE_ANCHOR_MASTER
)
70 return "WL_NAN_ROLE_UNKNOWN";
74 static const char *nan_event_to_str(u16 cmd
)
77 C2S(WL_NAN_EVENT_START
)
78 C2S(WL_NAN_EVENT_DISCOVERY_RESULT
)
79 C2S(WL_NAN_EVENT_TERMINATED
)
80 C2S(WL_NAN_EVENT_RECEIVE
)
81 C2S(WL_NAN_EVENT_MERGE
)
82 C2S(WL_NAN_EVENT_STOP
)
83 C2S(WL_NAN_EVENT_PEER_DATAPATH_IND
)
84 C2S(WL_NAN_EVENT_DATAPATH_ESTB
)
85 C2S(WL_NAN_EVENT_SDF_RX
)
86 C2S(WL_NAN_EVENT_DATAPATH_END
)
87 C2S(WL_NAN_EVENT_RNG_REQ_IND
)
88 C2S(WL_NAN_EVENT_RNG_RPT_IND
)
89 C2S(WL_NAN_EVENT_RNG_TERM_IND
)
91 C2S(WL_NAN_EVENT_INVALID
)
94 return "WL_NAN_EVENT_UNKNOWN";
98 static int wl_cfgnan_execute_ioctl(struct net_device
*ndev
,
99 struct bcm_cfg80211
*cfg
, bcm_iov_batch_buf_t
*nan_buf
,
100 uint16 nan_buf_size
, uint32
*status
, uint8
*resp_buf
,
101 uint16 resp_buf_len
);
102 #ifdef WL_NAN_DISC_CACHE
103 /* ranging quest and response iovar handler */
104 static int wl_cfgnan_trigger_ranging(struct net_device
*ndev
,
105 struct bcm_cfg80211
*cfg
, void *event_data
, nan_svc_info_t
*svc
, uint8 range_req
);
106 #endif /* WL_NAN_DISC_CACHE */
108 wl_cfgnan_send_stop_event(nan_event_data_t
*nan_event_data
, struct bcm_cfg80211
*cfg
);
110 wl_cfgnan_generate_inst_id(struct bcm_cfg80211
*cfg
, uint8
*p_inst_id
)
114 if (p_inst_id
== NULL
) {
115 WL_ERR(("Invalid arguments\n"));
120 if (cfg
->nancfg
.inst_id_start
== NAN_ID_MAX
) {
121 WL_ERR(("Consumed all IDs, resetting the counter\n"));
122 cfg
->nancfg
.inst_id_start
= 0;
125 for (i
= cfg
->nancfg
.inst_id_start
; i
< NAN_ID_MAX
; i
++) {
126 if (isclr(cfg
->nancfg
.svc_inst_id_mask
, i
)) {
127 setbit(cfg
->nancfg
.svc_inst_id_mask
, i
);
129 cfg
->nancfg
.inst_id_start
= *p_inst_id
;
130 WL_DBG(("Instance ID=%d\n", *p_inst_id
));
134 WL_ERR(("Allocated maximum IDs\n"));
135 ret
= BCME_NORESOURCE
;
141 wl_cfgnan_remove_inst_id(struct bcm_cfg80211
*cfg
, uint8 inst_id
)
144 WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__
, inst_id
));
145 clrbit(cfg
->nancfg
.svc_inst_id_mask
, inst_id
-1);
148 s32
wl_cfgnan_parse_sdea_data(osl_t
*osh
, const uint8
*p_attr
,
149 uint16 len
, nan_event_data_t
*tlv_data
)
151 const wifi_nan_svc_desc_ext_attr_t
*nan_svc_desc_ext_attr
= NULL
;
155 /* service descriptor ext attributes */
156 nan_svc_desc_ext_attr
= (const wifi_nan_svc_desc_ext_attr_t
*)p_attr
;
159 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr
->id
));
161 /* attribute length */
162 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr
->len
));
164 tlv_data
->sde_control_flag
= nan_svc_desc_ext_attr
->control
;
165 offset
= sizeof(*nan_svc_desc_ext_attr
);
167 WL_ERR(("Invalid event buffer len\n"));
168 ret
= BCME_BUFTOOSHORT
;
174 if (tlv_data
->sde_control_flag
& NAN_SC_RANGE_LIMITED
) {
175 WL_TRACE(("> svc_control: range limited present\n"));
177 if (tlv_data
->sde_control_flag
& NAN_SDE_CF_SVC_UPD_IND_PRESENT
) {
178 WL_TRACE(("> svc_control: sdea svc specific info present\n"));
179 tlv_data
->sde_svc_info
.dlen
= (p_attr
[1] | (p_attr
[2] << 8));
180 WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data
->sde_svc_info
.dlen
));
181 if (!tlv_data
->sde_svc_info
.dlen
||
182 tlv_data
->sde_svc_info
.dlen
> NAN_MAX_SERVICE_SPECIFIC_INFO_LEN
) {
183 /* must be able to handle null msg which is not error */
184 tlv_data
->sde_svc_info
.dlen
= 0;
185 WL_ERR(("data length is invalid\n"));
190 if (tlv_data
->sde_svc_info
.dlen
> 0) {
191 tlv_data
->sde_svc_info
.data
= MALLOCZ(osh
, tlv_data
->sde_svc_info
.dlen
);
192 if (!tlv_data
->sde_svc_info
.data
) {
193 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
194 tlv_data
->sde_svc_info
.dlen
= 0;
198 /* advance read pointer, consider sizeof of Service Update Indicator */
199 offset
= sizeof(tlv_data
->sde_svc_info
.dlen
) - 1;
201 WL_ERR(("Invalid event buffer len\n"));
202 ret
= BCME_BUFTOOSHORT
;
207 memcpy(tlv_data
->sde_svc_info
.data
, p_attr
, tlv_data
->sde_svc_info
.dlen
);
209 /* must be able to handle null msg which is not error */
210 tlv_data
->sde_svc_info
.dlen
= 0;
211 WL_DBG(("%s: sdea svc info length is zero, null info data\n",
217 if (tlv_data
->sde_svc_info
.data
) {
218 MFREE(osh
, tlv_data
->sde_svc_info
.data
,
219 tlv_data
->sde_svc_info
.dlen
);
220 tlv_data
->sde_svc_info
.data
= NULL
;
223 WL_DBG(("Parse SDEA event data, status = %d\n", ret
));
228 * This attribute contains some mandatory fields and some optional fields
229 * depending on the content of the service discovery request.
232 wl_cfgnan_parse_sda_data(osl_t
*osh
, const uint8
*p_attr
,
233 uint16 len
, nan_event_data_t
*tlv_data
)
235 uint8 svc_control
= 0, offset
= 0;
237 const wifi_nan_svc_descriptor_attr_t
*nan_svc_desc_attr
= NULL
;
239 /* service descriptor attributes */
240 nan_svc_desc_attr
= (const wifi_nan_svc_descriptor_attr_t
*)p_attr
;
242 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr
->id
));
244 /* attribute length */
245 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr
->len
));
248 memcpy(tlv_data
->svc_name
, nan_svc_desc_attr
->svc_hash
, NAN_SVC_HASH_LEN
);
249 WL_TRACE(("> svc_hash_name: " MACDBG
"\n", MAC2STRDBG(tlv_data
->svc_name
)));
251 /* local instance ID */
252 tlv_data
->local_inst_id
= nan_svc_desc_attr
->instance_id
;
253 WL_TRACE(("> local instance id: 0x%02x\n", tlv_data
->local_inst_id
));
255 /* requestor instance ID */
256 tlv_data
->requestor_id
= nan_svc_desc_attr
->requestor_id
;
257 WL_TRACE(("> requestor id: 0x%02x\n", tlv_data
->requestor_id
));
259 /* service control */
260 svc_control
= nan_svc_desc_attr
->svc_control
;
261 if ((svc_control
& NAN_SVC_CONTROL_TYPE_MASK
) == NAN_SC_PUBLISH
) {
262 WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
263 } else if ((svc_control
& NAN_SVC_CONTROL_TYPE_MASK
) == NAN_SC_SUBSCRIBE
) {
264 WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
265 } else if ((svc_control
& NAN_SVC_CONTROL_TYPE_MASK
) == NAN_SC_FOLLOWUP
) {
266 WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
268 offset
= sizeof(*nan_svc_desc_attr
);
270 WL_ERR(("Invalid event buffer len\n"));
271 ret
= BCME_BUFTOOSHORT
;
279 * must be in order following by service descriptor attribute format
283 if (svc_control
& NAN_SC_BINDING_BITMAP_PRESENT
) {
285 WL_TRACE(("> svc_control: binding bitmap present\n"));
287 /* Copy binding bitmap */
288 memcpy(&bitmap
, p_attr
, NAN_BINDING_BITMAP_LEN
);
289 WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap
));
291 if (NAN_BINDING_BITMAP_LEN
> len
) {
292 WL_ERR(("Invalid event buffer len\n"));
293 ret
= BCME_BUFTOOSHORT
;
296 p_attr
+= NAN_BINDING_BITMAP_LEN
;
297 len
-= NAN_BINDING_BITMAP_LEN
;
300 /* matching filter */
301 if (svc_control
& NAN_SC_MATCHING_FILTER_PRESENT
) {
302 WL_TRACE(("> svc_control: matching filter present\n"));
304 tlv_data
->tx_match_filter
.dlen
= *p_attr
++;
305 WL_TRACE(("> matching filter len: 0x%02x\n",
306 tlv_data
->tx_match_filter
.dlen
));
308 if (!tlv_data
->tx_match_filter
.dlen
||
309 tlv_data
->tx_match_filter
.dlen
> MAX_MATCH_FILTER_LEN
) {
310 tlv_data
->tx_match_filter
.dlen
= 0;
311 WL_ERR(("tx match filter length is invalid\n"));
315 tlv_data
->tx_match_filter
.data
=
316 MALLOCZ(osh
, tlv_data
->tx_match_filter
.dlen
);
317 if (!tlv_data
->tx_match_filter
.data
) {
318 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
319 tlv_data
->tx_match_filter
.dlen
= 0;
323 memcpy(tlv_data
->tx_match_filter
.data
, p_attr
,
324 tlv_data
->tx_match_filter
.dlen
);
326 /* advance read pointer */
327 offset
= tlv_data
->tx_match_filter
.dlen
;
329 WL_ERR(("Invalid event buffer\n"));
330 ret
= BCME_BUFTOOSHORT
;
337 /* service response filter */
338 if (svc_control
& NAN_SC_SR_FILTER_PRESENT
) {
339 WL_TRACE(("> svc_control: service response filter present\n"));
341 tlv_data
->rx_match_filter
.dlen
= *p_attr
++;
342 WL_TRACE(("> sr match filter len: 0x%02x\n",
343 tlv_data
->rx_match_filter
.dlen
));
345 if (!tlv_data
->rx_match_filter
.dlen
||
346 tlv_data
->rx_match_filter
.dlen
> MAX_MATCH_FILTER_LEN
) {
347 tlv_data
->rx_match_filter
.dlen
= 0;
348 WL_ERR(("%s: sr matching filter length is invalid\n",
353 tlv_data
->rx_match_filter
.data
=
354 MALLOCZ(osh
, tlv_data
->rx_match_filter
.dlen
);
355 if (!tlv_data
->rx_match_filter
.data
) {
356 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
357 tlv_data
->rx_match_filter
.dlen
= 0;
362 memcpy(tlv_data
->rx_match_filter
.data
, p_attr
,
363 tlv_data
->rx_match_filter
.dlen
);
365 /* advance read pointer */
366 offset
= tlv_data
->rx_match_filter
.dlen
;
368 WL_ERR(("Invalid event buffer len\n"));
369 ret
= BCME_BUFTOOSHORT
;
376 /* service specific info */
377 if (svc_control
& NAN_SC_SVC_INFO_PRESENT
) {
378 WL_TRACE(("> svc_control: svc specific info present\n"));
380 tlv_data
->svc_info
.dlen
= *p_attr
++;
381 WL_TRACE(("> svc info len: 0x%02x\n", tlv_data
->svc_info
.dlen
));
383 if (!tlv_data
->svc_info
.dlen
||
384 tlv_data
->svc_info
.dlen
> NAN_MAX_SERVICE_SPECIFIC_INFO_LEN
) {
385 /* must be able to handle null msg which is not error */
386 tlv_data
->svc_info
.dlen
= 0;
387 WL_ERR(("data length is invalid\n"));
392 if (tlv_data
->svc_info
.dlen
> 0) {
393 tlv_data
->svc_info
.data
=
394 MALLOCZ(osh
, tlv_data
->svc_info
.dlen
);
395 if (!tlv_data
->svc_info
.data
) {
396 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
397 tlv_data
->svc_info
.dlen
= 0;
401 memcpy(tlv_data
->svc_info
.data
, p_attr
, tlv_data
->svc_info
.dlen
);
403 /* advance read pointer */
404 offset
= tlv_data
->svc_info
.dlen
;
406 WL_ERR(("Invalid event buffer len\n"));
407 ret
= BCME_BUFTOOSHORT
;
413 /* must be able to handle null msg which is not error */
414 tlv_data
->svc_info
.dlen
= 0;
415 WL_TRACE(("%s: svc info length is zero, null info data\n",
421 * discovery range limited:
422 * If set to 1, the pub/sub msg is limited in range to close proximity.
423 * If set to 0, the pub/sub msg is not limited in range.
424 * Valid only when the message is either of a publish or a sub.
426 if (svc_control
& NAN_SC_RANGE_LIMITED
) {
427 if (((svc_control
& NAN_SVC_CONTROL_TYPE_MASK
) == NAN_SC_PUBLISH
) ||
428 ((svc_control
& NAN_SVC_CONTROL_TYPE_MASK
) == NAN_SC_SUBSCRIBE
)) {
429 WL_TRACE(("> svc_control: range limited present\n"));
431 WL_TRACE(("range limited is only valid on pub or sub\n"));
436 /* advance read pointer */
441 if (tlv_data
->tx_match_filter
.data
) {
442 MFREE(osh
, tlv_data
->tx_match_filter
.data
,
443 tlv_data
->tx_match_filter
.dlen
);
444 tlv_data
->tx_match_filter
.data
= NULL
;
446 if (tlv_data
->rx_match_filter
.data
) {
447 MFREE(osh
, tlv_data
->rx_match_filter
.data
,
448 tlv_data
->rx_match_filter
.dlen
);
449 tlv_data
->rx_match_filter
.data
= NULL
;
451 if (tlv_data
->svc_info
.data
) {
452 MFREE(osh
, tlv_data
->svc_info
.data
,
453 tlv_data
->svc_info
.dlen
);
454 tlv_data
->svc_info
.data
= NULL
;
457 WL_DBG(("Parse SDA event data, status = %d\n", ret
));
462 wl_cfgnan_parse_sd_attr_data(osl_t
*osh
, uint16 len
, const uint8
*data
,
463 nan_event_data_t
*tlv_data
, uint16 type
) {
464 const uint8
*p_attr
= data
;
467 const wl_nan_event_disc_result_t
*ev_disc
= NULL
;
468 const wl_nan_event_replied_t
*ev_replied
= NULL
;
469 const wl_nan_ev_receive_t
*ev_fup
= NULL
;
472 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
474 if (type
== WL_NAN_XTLV_SD_DISC_RESULTS
) {
476 ev_disc
= (const wl_nan_event_disc_result_t
*)p_attr
;
478 WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
480 tlv_data
->pub_id
= (wl_nan_instance_id_t
)ev_disc
->pub_id
;
481 tlv_data
->sub_id
= (wl_nan_instance_id_t
)ev_disc
->sub_id
;
482 tlv_data
->publish_rssi
= ev_disc
->publish_rssi
;
483 memcpy(&tlv_data
->remote_nmi
, &ev_disc
->pub_mac
, ETHER_ADDR_LEN
);
485 WL_TRACE(("publish id: %d\n", ev_disc
->pub_id
));
486 WL_TRACE(("subscribe d: %d\n", ev_disc
->sub_id
));
487 WL_TRACE(("publish mac addr: " MACDBG
"\n",
488 MAC2STRDBG(ev_disc
->pub_mac
.octet
)));
489 WL_TRACE(("publish rssi: %d\n", (int8
)ev_disc
->publish_rssi
));
490 WL_TRACE(("attribute no: %d\n", ev_disc
->attr_num
));
491 WL_TRACE(("attribute len: %d\n", (uint16
)ev_disc
->attr_list_len
));
493 /* advance to the service descricptor */
494 offset
= OFFSETOF(wl_nan_event_disc_result_t
, attr_list
[0]);
496 WL_ERR(("Invalid event buffer len\n"));
497 ret
= BCME_BUFTOOSHORT
;
503 iter
= ev_disc
->attr_num
;
505 if ((uint8
)*p_attr
== NAN_ATTR_SVC_DESCRIPTOR
) {
506 WL_TRACE(("> attr id: 0x%02x\n", (uint8
)*p_attr
));
507 ret
= wl_cfgnan_parse_sda_data(osh
, p_attr
, len
, tlv_data
);
510 if ((uint8
)*p_attr
== NAN_ATTR_SVC_DESC_EXTENSION
) {
511 WL_TRACE(("> attr id: 0x%02x\n", (uint8
)*p_attr
));
512 ret
= wl_cfgnan_parse_sdea_data(osh
, p_attr
, len
, tlv_data
);
514 offset
= (sizeof(*p_attr
) +
515 sizeof(ev_disc
->attr_list_len
) +
516 (p_attr
[1] | (p_attr
[2] << 8)));
518 WL_ERR(("Invalid event buffer len\n"));
519 ret
= BCME_BUFTOOSHORT
;
526 } else if (type
== WL_NAN_XTLV_SD_FUP_RECEIVED
) {
528 ev_fup
= (const wl_nan_ev_receive_t
*)p_attr
;
530 WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
532 tlv_data
->local_inst_id
= (wl_nan_instance_id_t
)ev_fup
->local_id
;
533 tlv_data
->requestor_id
= (wl_nan_instance_id_t
)ev_fup
->remote_id
;
534 tlv_data
->fup_rssi
= ev_fup
->fup_rssi
;
535 memcpy(&tlv_data
->remote_nmi
, &ev_fup
->remote_addr
, ETHER_ADDR_LEN
);
537 WL_TRACE(("local id: %d\n", ev_fup
->local_id
));
538 WL_TRACE(("remote id: %d\n", ev_fup
->remote_id
));
539 WL_TRACE(("peer mac addr: " MACDBG
"\n",
540 MAC2STRDBG(ev_fup
->remote_addr
.octet
)));
541 WL_TRACE(("peer rssi: %d\n", (int8
)ev_fup
->fup_rssi
));
542 WL_TRACE(("attribute no: %d\n", ev_fup
->attr_num
));
543 WL_TRACE(("attribute len: %d\n", ev_fup
->attr_list_len
));
545 /* advance to the service descriptor which is attr_list[0] */
546 offset
= OFFSETOF(wl_nan_ev_receive_t
, attr_list
[0]);
548 WL_ERR(("Invalid event buffer len\n"));
549 ret
= BCME_BUFTOOSHORT
;
555 iter
= ev_fup
->attr_num
;
557 if ((uint8
)*p_attr
== NAN_ATTR_SVC_DESCRIPTOR
) {
558 WL_TRACE(("> attr id: 0x%02x\n", (uint8
)*p_attr
));
559 ret
= wl_cfgnan_parse_sda_data(osh
, p_attr
, len
, tlv_data
);
561 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
562 "error = %d \n", ret
));
567 if ((uint8
)*p_attr
== NAN_ATTR_SVC_DESC_EXTENSION
) {
568 WL_TRACE(("> attr id: 0x%02x\n", (uint8
)*p_attr
));
569 ret
= wl_cfgnan_parse_sdea_data(osh
, p_attr
, len
, tlv_data
);
571 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
572 "error = %d \n", ret
));
576 offset
= (sizeof(*p_attr
) +
577 sizeof(ev_fup
->attr_list_len
) +
578 (p_attr
[1] | (p_attr
[2] << 8)));
580 WL_ERR(("Invalid event buffer len\n"));
581 ret
= BCME_BUFTOOSHORT
;
588 } else if (type
== WL_NAN_XTLV_SD_SDF_RX
) {
590 * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
591 * and svc controls are optional.
593 const nan2_pub_act_frame_t
*nan_pub_af
=
594 (const nan2_pub_act_frame_t
*)p_attr
;
596 WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
598 /* nan2_pub_act_frame_t */
599 WL_TRACE(("pub category: 0x%02x\n", nan_pub_af
->category_id
));
600 WL_TRACE(("pub action: 0x%02x\n", nan_pub_af
->action_field
));
601 WL_TRACE(("nan oui: %2x-%2x-%2x\n",
602 nan_pub_af
->oui
[0], nan_pub_af
->oui
[1], nan_pub_af
->oui
[2]));
603 WL_TRACE(("oui type: 0x%02x\n", nan_pub_af
->oui_type
));
604 WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af
->oui_sub_type
));
606 offset
= sizeof(*nan_pub_af
);
608 WL_ERR(("Invalid event buffer len\n"));
609 ret
= BCME_BUFTOOSHORT
;
614 } else if (type
== WL_NAN_XTLV_SD_REPLIED
) {
615 ev_replied
= (const wl_nan_event_replied_t
*)p_attr
;
617 WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
619 tlv_data
->pub_id
= (wl_nan_instance_id_t
)ev_replied
->pub_id
;
620 tlv_data
->sub_id
= (wl_nan_instance_id_t
)ev_replied
->sub_id
;
621 tlv_data
->sub_rssi
= ev_replied
->sub_rssi
;
622 memcpy(&tlv_data
->remote_nmi
, &ev_replied
->sub_mac
, ETHER_ADDR_LEN
);
624 WL_TRACE(("publish id: %d\n", ev_replied
->pub_id
));
625 WL_TRACE(("subscribe d: %d\n", ev_replied
->sub_id
));
626 WL_TRACE(("Subscriber mac addr: " MACDBG
"\n",
627 MAC2STRDBG(ev_replied
->sub_mac
.octet
)));
628 WL_TRACE(("subscribe rssi: %d\n", (int8
)ev_replied
->sub_rssi
));
629 WL_TRACE(("attribute no: %d\n", ev_replied
->attr_num
));
630 WL_TRACE(("attribute len: %d\n", (uint16
)ev_replied
->attr_list_len
));
632 /* advance to the service descriptor which is attr_list[0] */
633 offset
= OFFSETOF(wl_nan_event_replied_t
, attr_list
[0]);
635 WL_ERR(("Invalid event buffer len\n"));
636 ret
= BCME_BUFTOOSHORT
;
641 ret
= wl_cfgnan_parse_sda_data(osh
, p_attr
, len
, tlv_data
);
648 /* Based on each case of tlv type id, fill into tlv data */
650 wl_cfgnan_set_vars_cbfn(void *ctx
, const uint8
*data
, uint16 type
, uint16 len
)
652 nan_parse_event_ctx_t
*ctx_tlv_data
= ((nan_parse_event_ctx_t
*)(ctx
));
653 nan_event_data_t
*tlv_data
= ((nan_event_data_t
*)(ctx_tlv_data
->nan_evt_data
));
658 WL_ERR(("data length is invalid\n"));
665 * Need to parse service descript attributes including service control,
666 * when Follow up or Discovery result come
668 case WL_NAN_XTLV_SD_FUP_RECEIVED
:
669 case WL_NAN_XTLV_SD_DISC_RESULTS
: {
670 ret
= wl_cfgnan_parse_sd_attr_data(ctx_tlv_data
->cfg
->osh
,
671 len
, data
, tlv_data
, type
);
674 case WL_NAN_XTLV_SD_SVC_INFO
: {
675 tlv_data
->svc_info
.data
=
676 MALLOCZ(ctx_tlv_data
->cfg
->osh
, len
);
677 if (!tlv_data
->svc_info
.data
) {
678 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
679 tlv_data
->svc_info
.dlen
= 0;
683 tlv_data
->svc_info
.dlen
= len
;
684 memcpy(tlv_data
->svc_info
.data
, data
, tlv_data
->svc_info
.dlen
);
688 WL_ERR(("Not available for tlv type = 0x%x\n", type
));
698 wl_cfg_nan_check_cmd_len(uint16 nan_iov_len
, uint16 data_size
,
703 if (subcmd_len
!= NULL
) {
704 *subcmd_len
= OFFSETOF(bcm_iov_batch_subcmd_t
, data
) +
705 ALIGN_SIZE(data_size
, 4);
706 if (*subcmd_len
> nan_iov_len
) {
707 WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
708 __FUNCTION__
, *subcmd_len
, nan_iov_len
));
712 WL_ERR(("Invalid subcmd_len\n"));
719 wl_cfgnan_config_eventmask(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
720 uint8 event_ind_flag
, bool disable_events
)
722 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
724 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
726 uint32 event_mask
= 0;
728 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
729 bcm_iov_batch_subcmd_t
*sub_cmd_resp
= NULL
;
730 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
732 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
736 ret
= wl_add_remove_eventmsg(ndev
, WLC_E_NAN
, true);
738 WL_ERR((" nan event enable failed, error = %d \n", ret
));
742 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
744 WL_ERR(("%s: memory allocation failed\n", __func__
));
749 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
751 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
752 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(uint8
*)(&nan_buf
->cmds
[0]);
754 ret
= wl_cfg_nan_check_cmd_len(nan_buf_size
,
755 sizeof(event_mask
), &subcmd_len
);
757 WL_ERR(("nan_sub_cmd check failed\n"));
761 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_EVENT_MASK
);
762 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(event_mask
);
763 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
764 nan_buf_size
-= subcmd_len
;
767 if (disable_events
) {
768 WL_DBG(("Disabling all nan events..except stop event\n"));
769 event_mask
= NAN_EVENT_BIT(WL_NAN_EVENT_STOP
);
772 * Android framework event mask configuration.
774 if (event_ind_flag
) {
775 nan_buf
->is_set
= false;
776 memset(resp_buf
, 0, sizeof(resp_buf
));
777 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
778 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
779 if (unlikely(ret
) || unlikely(status
)) {
780 WL_ERR(("get nan event mask failed ret %d status %d \n",
784 sub_cmd_resp
= &((bcm_iov_batch_buf_t
*)(resp_buf
))->cmds
[0];
786 /* check the response buff */
787 event_mask
= (*(uint8
*)&sub_cmd_resp
->data
[0]);
789 if (CHECK_BIT(event_ind_flag
, WL_NAN_EVENT_DIC_MAC_ADDR_BIT
)) {
790 WL_DBG(("Need to add disc mac addr change event\n"));
792 /* BIT2 - Disable nan cluster join indication (OTA). */
793 if (CHECK_BIT(event_ind_flag
, WL_NAN_EVENT_JOIN_EVENT
)) {
794 event_mask
&= ~NAN_EVENT_BIT(WL_NAN_EVENT_MERGE
);
797 /* enable only selected nan events to avoid unnecessary host wake up */
798 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_START
);
799 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_MERGE
);
802 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_DISCOVERY_RESULT
);
803 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_RECEIVE
);
804 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_TERMINATED
);
805 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_STOP
);
806 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_TXS
);
807 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_PEER_DATAPATH_IND
);
808 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_ESTB
);
809 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_END
);
810 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_RPT_IND
);
811 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_REQ_IND
);
812 event_mask
|= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_TERM_IND
);
815 nan_buf
->is_set
= true;
816 memcpy(sub_cmd
->data
, &event_mask
, sizeof(event_mask
));
817 nan_buf_size
= (NAN_IOCTL_BUF_SIZE
- nan_buf_size
);
818 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
819 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
820 if (unlikely(ret
) || unlikely(status
)) {
821 WL_ERR(("set nan event mask failed ret %d status %d \n", ret
, status
));
824 WL_DBG(("set nan event mask successfull\n"));
828 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
835 wl_cfgnan_set_nan_avail(struct net_device
*ndev
,
836 struct bcm_cfg80211
*cfg
, nan_avail_cmd_data
*cmd_data
, uint8 avail_type
)
838 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
840 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
842 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
843 wl_nan_iov_t
*nan_iov_data
= NULL
;
844 wl_avail_t
*avail
= NULL
;
845 wl_avail_entry_t
*entry
; /* used for filling entry structure */
846 uint8
*p
; /* tracking pointer */
850 char ndc_id
[ETHER_ADDR_LEN
] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
851 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
852 char *a
= WL_AVAIL_BIT_MAP
;
853 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
857 /* Do not disturb avail if dam is supported */
858 if (FW_SUPPORTED(dhdp
, autodam
)) {
859 WL_DBG(("DAM is supported, avail modification not allowed\n"));
863 if (avail_type
< WL_AVAIL_LOCAL
|| avail_type
> WL_AVAIL_TYPE_MAX
) {
864 WL_ERR(("Invalid availability type\n"));
865 ret
= BCME_USAGE_ERROR
;
869 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
871 WL_ERR(("%s: memory allocation failed\n", __func__
));
876 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
878 WL_ERR(("%s: memory allocation failed\n", __func__
));
883 nan_iov_data
->nan_iov_len
= NAN_IOCTL_BUF_SIZE
;
884 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
886 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
887 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
889 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
890 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
891 sizeof(*avail
), &subcmd_len
);
893 WL_ERR(("nan_sub_cmd check failed\n"));
896 avail
= (wl_avail_t
*)sub_cmd
->data
;
898 /* populate wl_avail_type */
899 avail
->flags
= avail_type
;
900 if (avail_type
== WL_AVAIL_RANGING
) {
901 memcpy(&avail
->addr
, &cmd_data
->peer_nmi
, ETHER_ADDR_LEN
);
904 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + subcmd_len
;
905 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_AVAIL
);
906 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
908 nan_buf
->is_set
= false;
910 nan_iov_data
->nan_iov_len
-= subcmd_len
;
911 nan_buf_size
= (NAN_IOCTL_BUF_SIZE
- nan_iov_data
->nan_iov_len
);
913 WL_TRACE(("Read wl nan avail status\n"));
914 memset(resp_buf
, 0, sizeof(resp_buf
));
915 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
916 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
918 WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret
, status
));
922 if (status
== BCME_NOTFOUND
) {
924 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
925 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
927 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
929 avail
= (wl_avail_t
*)sub_cmd
->data
;
932 /* populate wl_avail fields */
933 avail
->length
= OFFSETOF(wl_avail_t
, entry
);
934 avail
->flags
= avail_type
;
935 avail
->num_entries
= 0;
937 entry
= (wl_avail_entry_t
*)p
;
938 entry
->flags
= WL_AVAIL_ENTRY_COM
;
940 /* set default values for optional parameters */
941 entry
->start_offset
= 0;
944 if (cmd_data
->avail_period
) {
945 entry
->period
= cmd_data
->avail_period
;
947 entry
->period
= WL_AVAIL_PERIOD_1024
;
950 if (cmd_data
->duration
!= NAN_BAND_INVALID
) {
951 entry
->flags
|= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT
) |
952 (cmd_data
->duration
<< WL_AVAIL_ENTRY_BIT_DUR_SHIFT
);
954 entry
->flags
|= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT
) |
955 (WL_AVAIL_BIT_DUR_16
<< WL_AVAIL_ENTRY_BIT_DUR_SHIFT
);
957 entry
->bitmap_len
= 0;
959 if (avail_type
== WL_AVAIL_LOCAL
) {
960 entry
->flags
|= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT
;
961 /* Check for 5g support, based on that choose 5g channel */
962 if (cfg
->support_5g
) {
963 entry
->u
.channel_info
=
964 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G
,
965 WL_AVAIL_BANDWIDTH_5G
));
967 entry
->u
.channel_info
=
968 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G
,
969 WL_AVAIL_BANDWIDTH_2G
));
971 entry
->flags
= htod16(entry
->flags
);
974 if (cfg
->support_5g
) {
975 a
= WL_5G_AVAIL_BIT_MAP
;
978 /* point to bitmap value for processing */
979 if (cmd_data
->bmap
) {
980 for (c
= (WL_NAN_EVENT_CLEAR_BIT
-1); c
>= 0; c
--) {
981 i
= cmd_data
->bmap
>> c
;
983 setbit(entry
->bitmap
, (WL_NAN_EVENT_CLEAR_BIT
-c
-1));
987 for (i
= 0; i
< strlen(WL_AVAIL_BIT_MAP
); i
++) {
989 setbit(entry
->bitmap
, i
);
995 /* account for partially filled most significant byte */
996 entry
->bitmap_len
= ((WL_NAN_EVENT_CLEAR_BIT
) + NBBY
- 1) / NBBY
;
997 if (avail_type
== WL_AVAIL_NDC
) {
998 memcpy(&avail
->addr
, ndc_id
, ETHER_ADDR_LEN
);
999 } else if (avail_type
== WL_AVAIL_RANGING
) {
1000 memcpy(&avail
->addr
, &cmd_data
->peer_nmi
, ETHER_ADDR_LEN
);
1002 /* account for partially filled most significant byte */
1004 /* update wl_avail and populate wl_avail_entry */
1005 entry
->length
= OFFSETOF(wl_avail_entry_t
, bitmap
) + entry
->bitmap_len
;
1006 avail
->num_entries
++;
1007 avail
->length
+= entry
->length
;
1008 /* advance pointer for next entry */
1011 /* convert to dongle endianness */
1012 entry
->length
= htod16(entry
->length
);
1013 entry
->start_offset
= htod16(entry
->start_offset
);
1014 entry
->u
.channel_info
= htod32(entry
->u
.channel_info
);
1015 entry
->flags
= htod16(entry
->flags
);
1016 /* update avail_len only if
1017 * there are avail entries
1019 if (avail
->num_entries
) {
1020 nan_iov_data
->nan_iov_len
-= avail
->length
;
1021 avail
->length
= htod16(avail
->length
);
1022 avail
->flags
= htod16(avail
->flags
);
1024 avail
->length
= htod16(avail
->length
);
1026 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_AVAIL
);
1027 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + avail
->length
;
1028 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1030 nan_buf
->is_set
= true;
1033 /* Reduce the iov_len size by subcmd_len */
1034 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1035 nan_buf_size
= (NAN_IOCTL_BUF_SIZE
- nan_iov_data
->nan_iov_len
);
1037 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
1038 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
1039 if (unlikely(ret
) || unlikely(status
)) {
1040 WL_ERR(("\n set nan avail failed ret %d status %d \n", ret
, status
));
1044 } else if (status
== BCME_OK
) {
1045 WL_DBG(("Avail type [%d] found to be configured\n", avail_type
));
1047 WL_ERR(("set nan avail failed ret %d status %d \n", ret
, status
));
1052 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
1055 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
1063 wl_cfgnan_config_control_flag(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
1064 uint32 flag
, uint32
*status
, bool set
)
1066 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
1068 uint16 nan_iov_start
, nan_iov_end
;
1069 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
1071 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1072 bcm_iov_batch_subcmd_t
*sub_cmd_resp
= NULL
;
1073 wl_nan_iov_t
*nan_iov_data
= NULL
;
1075 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
1077 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
1080 WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d",
1081 __FUNCTION__
, flag
, set
));
1082 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
1084 WL_ERR(("%s: memory allocation failed\n", __func__
));
1089 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
1090 if (!nan_iov_data
) {
1091 WL_ERR(("%s: memory allocation failed\n", __func__
));
1096 nan_iov_data
->nan_iov_len
= nan_iov_start
= NAN_IOCTL_BUF_SIZE
;
1097 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
1099 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
1100 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
1101 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1103 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1104 sizeof(cfg_ctrl
), &subcmd_len
);
1105 if (unlikely(ret
)) {
1106 WL_ERR(("nan_sub_cmd check failed\n"));
1110 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_NAN_CONFIG
);
1111 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(cfg_ctrl
);
1112 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1114 nan_buf
->is_set
= false;
1117 /* Reduce the iov_len size by subcmd_len */
1118 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1119 nan_iov_end
= nan_iov_data
->nan_iov_len
;
1120 nan_buf_size
= (nan_iov_start
- nan_iov_end
);
1122 memset(resp_buf
, 0, sizeof(resp_buf
));
1123 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, status
,
1124 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
1125 if (unlikely(ret
) || unlikely(*status
)) {
1126 WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret
, *status
));
1129 sub_cmd_resp
= &((bcm_iov_batch_buf_t
*)(resp_buf
))->cmds
[0];
1131 /* check the response buff */
1132 cfg_ctrl
= (*(uint32
*)&sub_cmd_resp
->data
[0]);
1138 memcpy(sub_cmd
->data
, &cfg_ctrl
, sizeof(cfg_ctrl
));
1139 nan_buf
->is_set
= true;
1140 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, status
,
1141 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
1142 if (unlikely(ret
) || unlikely(*status
)) {
1143 WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret
, *status
));
1146 WL_DBG(("set nan cfg ctrl successfull\n"));
1149 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
1152 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
1160 wl_cfgnan_get_iovars_status(void *ctx
, const uint8
*data
, uint16 type
, uint16 len
)
1162 bcm_iov_batch_buf_t
*b_resp
= (bcm_iov_batch_buf_t
*)ctx
;
1164 /* if all tlvs are parsed, we should not be here */
1165 if (b_resp
->count
== 0) {
1169 /* cbfn params may be used in f/w */
1170 if (len
< sizeof(status
)) {
1171 return BCME_BUFTOOSHORT
;
1174 /* first 4 bytes consists status */
1175 memcpy(&status
, data
, sizeof(uint32
));
1176 status
= dtoh32(status
);
1178 /* If status is non zero */
1179 if (status
!= BCME_OK
) {
1180 printf("cmd type %d failed, status: %04x\n", type
, status
);
1184 if (b_resp
->count
> 0) {
1188 if (!b_resp
->count
) {
1189 status
= BCME_IOV_LAST_CMD
;
1196 wl_cfgnan_execute_ioctl(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
1197 bcm_iov_batch_buf_t
*nan_buf
, uint16 nan_buf_size
, uint32
*status
,
1198 uint8
*resp_buf
, uint16 resp_buf_size
)
1203 bcm_iov_batch_buf_t
*p_resp
= NULL
;
1205 int max_resp_len
= WLC_IOCTL_MAXLEN
;
1207 WL_DBG(("Enter:\n"));
1208 if (nan_buf
->is_set
) {
1209 ret
= wldev_iovar_setbuf(ndev
, "nan", nan_buf
, nan_buf_size
,
1210 resp_buf
, resp_buf_size
, NULL
);
1211 p_resp
= (bcm_iov_batch_buf_t
*)(resp_buf
+ strlen(iov
) + 1);
1213 ret
= wldev_iovar_getbuf(ndev
, "nan", nan_buf
, nan_buf_size
,
1214 resp_buf
, resp_buf_size
, NULL
);
1215 p_resp
= (bcm_iov_batch_buf_t
*)(resp_buf
);
1217 if (unlikely(ret
)) {
1218 WL_ERR((" nan execute ioctl failed, error = %d \n", ret
));
1222 p_resp
->is_set
= nan_buf
->is_set
;
1223 tlvs_len
= max_resp_len
- OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
1225 /* Extract the tlvs and print their resp in cb fn */
1226 res
= bcm_unpack_xtlv_buf((void *)p_resp
, (const uint8
*)&p_resp
->cmds
[0],
1227 tlvs_len
, BCM_IOV_CMD_OPT_ALIGN32
, wl_cfgnan_get_iovars_status
);
1229 if (res
== BCME_IOV_LAST_CMD
) {
1234 WL_DBG((" nan ioctl ret %d status %d \n", ret
, *status
));
1240 wl_cfgnan_if_addr_handler(void *p_buf
, uint16
*nan_buf_size
,
1241 struct ether_addr
*if_addr
)
1249 if (p_buf
!= NULL
) {
1250 bcm_iov_batch_subcmd_t
*sub_cmd
= (bcm_iov_batch_subcmd_t
*)(p_buf
);
1252 ret
= wl_cfg_nan_check_cmd_len(*nan_buf_size
,
1253 sizeof(*if_addr
), &subcmd_len
);
1254 if (unlikely(ret
)) {
1255 WL_ERR(("nan_sub_cmd check failed\n"));
1259 /* Fill the sub_command block */
1260 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_IF_ADDR
);
1261 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(*if_addr
);
1262 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1263 memcpy(sub_cmd
->data
, (uint8
*)if_addr
,
1266 *nan_buf_size
-= subcmd_len
;
1268 WL_ERR(("nan_iov_buf is NULL\n"));
1279 wl_cfgnan_set_if_addr(struct bcm_cfg80211
*cfg
)
1282 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
1284 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
1285 struct ether_addr if_addr
;
1286 uint8 buf
[NAN_IOCTL_BUF_SIZE
];
1287 bcm_iov_batch_buf_t
*nan_buf
= (bcm_iov_batch_buf_t
*)buf
;
1288 bool rand_mac
= cfg
->nancfg
.mac_rand
;
1290 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
1292 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
1294 RANDOM_BYTES(if_addr
.octet
, 6);
1295 /* restore mcast and local admin bits to 0 and 1 */
1296 ETHER_SET_UNICAST(if_addr
.octet
);
1297 ETHER_SET_LOCALADDR(if_addr
.octet
);
1299 /* Use primary MAC with the locally administered bit for the
1302 if (wl_get_vif_macaddr(cfg
, WL_IF_TYPE_NAN_NMI
,
1303 if_addr
.octet
) != BCME_OK
) {
1308 WL_INFORM_MEM(("%s: NMI " MACDBG
"\n",
1309 __FUNCTION__
, MAC2STRDBG(if_addr
.octet
)));
1310 ret
= wl_cfgnan_if_addr_handler(&nan_buf
->cmds
[0],
1311 &nan_buf_size
, &if_addr
);
1312 if (unlikely(ret
)) {
1313 WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1317 nan_buf
->is_set
= true;
1318 nan_buf_size
= NAN_IOCTL_BUF_SIZE
- nan_buf_size
;
1319 memset(resp_buf
, 0, sizeof(resp_buf
));
1320 ret
= wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg
), cfg
,
1321 nan_buf
, nan_buf_size
, &status
,
1322 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
1323 if (unlikely(ret
) || unlikely(status
)) {
1324 WL_ERR(("nan if addr handler failed ret %d status %d\n",
1328 memcpy(cfg
->nan_nmi_mac
, if_addr
.octet
, ETH_ALEN
);
1332 wl_release_vif_macaddr(cfg
, if_addr
.octet
, WL_IF_TYPE_NAN_NMI
);
1339 wl_cfgnan_init_handler(void *p_buf
, uint16
*nan_buf_size
, bool val
)
1347 if (p_buf
!= NULL
) {
1348 bcm_iov_batch_subcmd_t
*sub_cmd
= (bcm_iov_batch_subcmd_t
*)(p_buf
);
1350 ret
= wl_cfg_nan_check_cmd_len(*nan_buf_size
,
1351 sizeof(val
), &subcmd_len
);
1352 if (unlikely(ret
)) {
1353 WL_ERR(("nan_sub_cmd check failed\n"));
1357 /* Fill the sub_command block */
1358 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_NAN_INIT
);
1359 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(uint8
);
1360 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1361 memcpy(sub_cmd
->data
, (uint8
*)&val
, sizeof(uint8
));
1363 *nan_buf_size
-= subcmd_len
;
1365 WL_ERR(("nan_iov_buf is NULL\n"));
1376 wl_cfgnan_enable_handler(wl_nan_iov_t
*nan_iov_data
, bool val
)
1380 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1385 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1387 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1388 sizeof(val
), &subcmd_len
);
1389 if (unlikely(ret
)) {
1390 WL_ERR(("nan_sub_cmd check failed\n"));
1394 /* Fill the sub_command block */
1395 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_NAN_ENAB
);
1396 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(uint8
);
1397 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1398 memcpy(sub_cmd
->data
, (uint8
*)&val
, sizeof(uint8
));
1400 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1401 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1407 wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t
*cmd_data
,
1408 wl_nan_iov_t
*nan_iov_data
)
1410 /* wl nan warm_up_time */
1412 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1413 wl_nan_warmup_time_ticks_t
*wup_ticks
= NULL
;
1417 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1418 wup_ticks
= (wl_nan_warmup_time_ticks_t
*)sub_cmd
->data
;
1420 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1421 sizeof(*wup_ticks
), &subcmd_len
);
1422 if (unlikely(ret
)) {
1423 WL_ERR(("nan_sub_cmd check failed\n"));
1426 /* Fill the sub_command block */
1427 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_WARMUP_TIME
);
1428 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
1430 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1431 *wup_ticks
= cmd_data
->warmup_time
;
1433 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1434 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1441 wl_cfgnan_set_election_metric(nan_config_cmd_data_t
*cmd_data
,
1442 wl_nan_iov_t
*nan_iov_data
, uint32 nan_attr_mask
)
1445 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1446 wl_nan_election_metric_config_t
*metrics
= NULL
;
1451 (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1452 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1453 sizeof(*metrics
), &subcmd_len
);
1454 if (unlikely(ret
)) {
1455 WL_ERR(("nan_sub_cmd check failed\n"));
1459 metrics
= (wl_nan_election_metric_config_t
*)sub_cmd
->data
;
1461 if (nan_attr_mask
& NAN_ATTR_RAND_FACTOR_CONFIG
) {
1462 metrics
->random_factor
= (uint8
)cmd_data
->metrics
.random_factor
;
1465 if ((!cmd_data
->metrics
.master_pref
) ||
1466 (cmd_data
->metrics
.master_pref
> NAN_MAXIMUM_MASTER_PREFERENCE
)) {
1467 WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
1468 /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
1469 metrics
->master_pref
= (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE
/2)) + 1;
1471 metrics
->master_pref
= (uint8
)cmd_data
->metrics
.master_pref
;
1473 sub_cmd
->id
= htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG
);
1474 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
1476 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1478 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1479 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1487 wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t
*cmd_data
,
1488 wl_nan_iov_t
*nan_iov_data
, uint32 nan_attr_mask
)
1491 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1492 wl_nan_rssi_notif_thld_t
*rssi_notif_thld
= NULL
;
1496 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1498 rssi_notif_thld
= (wl_nan_rssi_notif_thld_t
*)sub_cmd
->data
;
1500 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1501 sizeof(*rssi_notif_thld
), &subcmd_len
);
1502 if (unlikely(ret
)) {
1503 WL_ERR(("nan_sub_cmd check failed\n"));
1506 if (nan_attr_mask
& NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG
) {
1507 rssi_notif_thld
->bcn_rssi_2g
=
1508 cmd_data
->rssi_attr
.rssi_proximity_2dot4g_val
;
1510 /* Keeping RSSI threshold value to be -70dBm */
1511 rssi_notif_thld
->bcn_rssi_2g
= NAN_DEF_RSSI_NOTIF_THRESH
;
1514 if (nan_attr_mask
& NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG
) {
1515 rssi_notif_thld
->bcn_rssi_5g
=
1516 cmd_data
->rssi_attr
.rssi_proximity_5g_val
;
1518 /* Keeping RSSI threshold value to be -70dBm */
1519 rssi_notif_thld
->bcn_rssi_5g
= NAN_DEF_RSSI_NOTIF_THRESH
;
1522 sub_cmd
->id
= htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD
);
1523 sub_cmd
->len
= htod16(sizeof(sub_cmd
->u
.options
) + sizeof(*rssi_notif_thld
));
1524 sub_cmd
->u
.options
= htod32(BCM_XTLV_OPTION_ALIGN32
);
1526 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1527 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1534 wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t
*cmd_data
,
1535 wl_nan_iov_t
*nan_iov_data
, uint32 nan_attr_mask
)
1538 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1539 wl_nan_rssi_thld_t
*rssi_thld
= NULL
;
1543 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1544 rssi_thld
= (wl_nan_rssi_thld_t
*)sub_cmd
->data
;
1546 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1547 sizeof(*rssi_thld
), &subcmd_len
);
1548 if (unlikely(ret
)) {
1549 WL_ERR(("nan_sub_cmd check failed\n"));
1554 * Keeping RSSI mid value -75dBm for both 2G and 5G
1555 * Keeping RSSI close value -60dBm for both 2G and 5G
1557 if (nan_attr_mask
& NAN_ATTR_RSSI_MIDDLE_2G_CONFIG
) {
1558 rssi_thld
->rssi_mid_2g
=
1559 cmd_data
->rssi_attr
.rssi_middle_2dot4g_val
;
1561 rssi_thld
->rssi_mid_2g
= NAN_DEF_RSSI_MID
;
1564 if (nan_attr_mask
& NAN_ATTR_RSSI_MIDDLE_5G_CONFIG
) {
1565 rssi_thld
->rssi_mid_5g
=
1566 cmd_data
->rssi_attr
.rssi_middle_5g_val
;
1568 rssi_thld
->rssi_mid_5g
= NAN_DEF_RSSI_MID
;
1571 if (nan_attr_mask
& NAN_ATTR_RSSI_CLOSE_CONFIG
) {
1572 rssi_thld
->rssi_close_2g
=
1573 cmd_data
->rssi_attr
.rssi_close_2dot4g_val
;
1575 rssi_thld
->rssi_close_2g
= NAN_DEF_RSSI_CLOSE
;
1578 if (nan_attr_mask
& NAN_ATTR_RSSI_CLOSE_5G_CONFIG
) {
1579 rssi_thld
->rssi_close_5g
=
1580 cmd_data
->rssi_attr
.rssi_close_5g_val
;
1582 rssi_thld
->rssi_close_5g
= NAN_DEF_RSSI_CLOSE
;
1585 sub_cmd
->id
= htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD
);
1586 sub_cmd
->len
= htod16(sizeof(sub_cmd
->u
.options
) + sizeof(*rssi_thld
));
1587 sub_cmd
->u
.options
= htod32(BCM_XTLV_OPTION_ALIGN32
);
1589 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1590 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1597 check_for_valid_5gchan(struct net_device
*ndev
, uint8 chan
)
1601 u8 ioctl_buf
[WLC_IOCTL_SMLEN
];
1602 uint32 chanspec_arg
;
1605 chanspec_arg
= CH20MHZ_CHSPEC(chan
);
1606 chanspec_arg
= wl_chspec_host_to_driver(chanspec_arg
);
1607 memset(ioctl_buf
, 0, WLC_IOCTL_SMLEN
);
1608 ret
= wldev_iovar_getbuf(ndev
, "per_chan_info", (void *)&chanspec_arg
, sizeof(chanspec_arg
),
1609 ioctl_buf
, WLC_IOCTL_SMLEN
, NULL
);
1610 if (ret
!= BCME_OK
) {
1611 WL_ERR(("Chaninfo for channel = %d, error %d\n", chan
, ret
));
1615 bitmap
= dtoh32(*(uint
*)ioctl_buf
);
1616 if (!(bitmap
& WL_CHAN_VALID_HW
)) {
1617 WL_ERR(("Invalid channel\n"));
1622 if (!(bitmap
& WL_CHAN_VALID_SW
)) {
1623 WL_ERR(("Not supported in current locale\n"));
1633 wl_cfgnan_set_nan_soc_chans(struct net_device
*ndev
, nan_config_cmd_data_t
*cmd_data
,
1634 wl_nan_iov_t
*nan_iov_data
, uint32 nan_attr_mask
)
1637 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1638 wl_nan_social_channels_t
*soc_chans
= NULL
;
1643 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1645 (wl_nan_social_channels_t
*)sub_cmd
->data
;
1647 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1648 sizeof(*soc_chans
), &subcmd_len
);
1649 if (unlikely(ret
)) {
1650 WL_ERR(("nan_sub_cmd check failed\n"));
1654 sub_cmd
->id
= htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN
);
1655 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
1657 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1658 if (nan_attr_mask
& NAN_ATTR_2G_CHAN_CONFIG
) {
1659 soc_chans
->soc_chan_2g
= cmd_data
->chanspec
[1];
1661 soc_chans
->soc_chan_2g
= NAN_DEF_SOCIAL_CHAN_2G
;
1664 if (cmd_data
->support_5g
) {
1665 if (nan_attr_mask
& NAN_ATTR_5G_CHAN_CONFIG
) {
1666 soc_chans
->soc_chan_5g
= cmd_data
->chanspec
[2];
1668 soc_chans
->soc_chan_5g
= NAN_DEF_SOCIAL_CHAN_5G
;
1670 ret
= check_for_valid_5gchan(ndev
, soc_chans
->soc_chan_5g
);
1671 if (ret
!= BCME_OK
) {
1672 ret
= check_for_valid_5gchan(ndev
, NAN_DEF_SEC_SOCIAL_CHAN_5G
);
1673 if (ret
== BCME_OK
) {
1674 soc_chans
->soc_chan_5g
= NAN_DEF_SEC_SOCIAL_CHAN_5G
;
1676 soc_chans
->soc_chan_5g
= 0;
1678 WL_ERR(("Current locale doesn't support 5G op"
1679 "continuing with 2G only operation\n"));
1683 WL_DBG(("5G support is disabled\n"));
1685 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1686 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1693 wl_cfgnan_set_nan_scan_params(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
1694 nan_config_cmd_data_t
*cmd_data
, uint8 band_index
, uint32 nan_attr_mask
)
1696 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
1698 uint16 nan_iov_start
, nan_iov_end
;
1699 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
1701 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1702 wl_nan_iov_t
*nan_iov_data
= NULL
;
1703 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
1704 wl_nan_scan_params_t
*scan_params
= NULL
;
1707 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
1711 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
1713 WL_ERR(("%s: memory allocation failed\n", __func__
));
1718 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
1719 if (!nan_iov_data
) {
1720 WL_ERR(("%s: memory allocation failed\n", __func__
));
1725 nan_iov_data
->nan_iov_len
= nan_iov_start
= NAN_IOCTL_BUF_SIZE
;
1726 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
1728 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
1729 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
1730 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1732 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1733 sizeof(*scan_params
), &subcmd_len
);
1734 if (unlikely(ret
)) {
1735 WL_ERR(("nan_sub_cmd check failed\n"));
1738 scan_params
= (wl_nan_scan_params_t
*)sub_cmd
->data
;
1740 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_SCAN_PARAMS
);
1741 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(*scan_params
);
1742 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1745 /* Fw default: Dwell time for 2G is 210 */
1746 if ((nan_attr_mask
& NAN_ATTR_2G_DWELL_TIME_CONFIG
) &&
1747 cmd_data
->dwell_time
[0]) {
1748 scan_params
->dwell_time
= cmd_data
->dwell_time
[0] +
1749 NAN_SCAN_DWELL_TIME_DELTA_MS
;
1751 /* Fw default: Scan period for 2G is 10 */
1752 if (nan_attr_mask
& NAN_ATTR_2G_SCAN_PERIOD_CONFIG
) {
1753 scan_params
->scan_period
= cmd_data
->scan_period
[0];
1756 if ((nan_attr_mask
& NAN_ATTR_5G_DWELL_TIME_CONFIG
) &&
1757 cmd_data
->dwell_time
[1]) {
1758 scan_params
->dwell_time
= cmd_data
->dwell_time
[1] +
1759 NAN_SCAN_DWELL_TIME_DELTA_MS
;
1761 if (nan_attr_mask
& NAN_ATTR_5G_SCAN_PERIOD_CONFIG
) {
1762 scan_params
->scan_period
= cmd_data
->scan_period
[1];
1765 scan_params
->band_index
= band_index
;
1766 nan_buf
->is_set
= true;
1769 /* Reduce the iov_len size by subcmd_len */
1770 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1771 nan_iov_end
= nan_iov_data
->nan_iov_len
;
1772 nan_buf_size
= (nan_iov_start
- nan_iov_end
);
1774 memset(resp_buf
, 0, sizeof(resp_buf
));
1775 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
1776 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
1777 if (unlikely(ret
) || unlikely(status
)) {
1778 WL_ERR(("set nan scan params failed ret %d status %d \n", ret
, status
));
1781 WL_DBG(("set nan scan params successfull\n"));
1784 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
1787 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
1795 wl_cfgnan_set_cluster_id(nan_config_cmd_data_t
*cmd_data
,
1796 wl_nan_iov_t
*nan_iov_data
)
1799 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1804 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1806 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1807 (sizeof(cmd_data
->clus_id
) - sizeof(uint8
)), &subcmd_len
);
1808 if (unlikely(ret
)) {
1809 WL_ERR(("nan_sub_cmd check failed\n"));
1813 cmd_data
->clus_id
.octet
[0] = 0x50;
1814 cmd_data
->clus_id
.octet
[1] = 0x6F;
1815 cmd_data
->clus_id
.octet
[2] = 0x9A;
1816 cmd_data
->clus_id
.octet
[3] = 0x01;
1817 WL_TRACE(("cluster_id = " MACDBG
"\n", MAC2STRDBG(cmd_data
->clus_id
.octet
)));
1819 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_CID
);
1820 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(cmd_data
->clus_id
);
1821 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1822 memcpy(sub_cmd
->data
, (uint8
*)&cmd_data
->clus_id
,
1823 sizeof(cmd_data
->clus_id
));
1825 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1826 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1833 wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t
*cmd_data
,
1834 wl_nan_iov_t
*nan_iov_data
)
1837 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1838 wl_nan_hop_count_t
*hop_limit
= NULL
;
1843 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1844 hop_limit
= (wl_nan_hop_count_t
*)sub_cmd
->data
;
1846 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1847 sizeof(*hop_limit
), &subcmd_len
);
1848 if (unlikely(ret
)) {
1849 WL_ERR(("nan_sub_cmd check failed\n"));
1853 *hop_limit
= cmd_data
->hop_count_limit
;
1854 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_HOP_LIMIT
);
1855 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(*hop_limit
);
1856 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1858 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1859 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1866 wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t
*cmd_data
,
1867 wl_nan_iov_t
*nan_iov_data
, uint32 nan_attr_mask
)
1870 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1871 wl_nan_sid_beacon_control_t
*sid_beacon
= NULL
;
1876 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1878 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1879 sizeof(*sid_beacon
), &subcmd_len
);
1880 if (unlikely(ret
)) {
1881 WL_ERR(("nan_sub_cmd check failed\n"));
1885 sid_beacon
= (wl_nan_sid_beacon_control_t
*)sub_cmd
->data
;
1886 sid_beacon
->sid_enable
= cmd_data
->sid_beacon
.sid_enable
;
1887 /* Need to have separate flag for sub beacons
1888 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
1890 if (nan_attr_mask
& NAN_ATTR_SID_BEACON_CONFIG
) {
1891 /* Limit for number of publish SIDs to be included in Beacons */
1892 sid_beacon
->sid_count
= cmd_data
->sid_beacon
.sid_count
;
1894 if (nan_attr_mask
& NAN_ATTR_SUB_SID_BEACON_CONFIG
) {
1895 /* Limit for number of subscribe SIDs to be included in Beacons */
1896 sid_beacon
->sub_sid_count
= cmd_data
->sid_beacon
.sub_sid_count
;
1898 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_SID_BEACON
);
1899 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
1900 sizeof(*sid_beacon
);
1901 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1903 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1904 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1910 wl_cfgnan_set_nan_oui(nan_config_cmd_data_t
*cmd_data
,
1911 wl_nan_iov_t
*nan_iov_data
)
1914 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1919 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1921 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1922 sizeof(cmd_data
->nan_oui
), &subcmd_len
);
1923 if (unlikely(ret
)) {
1924 WL_ERR(("nan_sub_cmd check failed\n"));
1928 sub_cmd
->id
= htod16(WL_NAN_CMD_CFG_OUI
);
1929 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(cmd_data
->nan_oui
);
1930 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
1931 memcpy(sub_cmd
->data
, (uint32
*)&cmd_data
->nan_oui
,
1932 sizeof(cmd_data
->nan_oui
));
1934 nan_iov_data
->nan_iov_len
-= subcmd_len
;
1935 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
1941 wl_cfgnan_set_awake_dws(struct net_device
*ndev
, nan_config_cmd_data_t
*cmd_data
,
1942 wl_nan_iov_t
*nan_iov_data
, struct bcm_cfg80211
*cfg
, uint32 nan_attr_mask
)
1945 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
1946 wl_nan_awake_dws_t
*awake_dws
= NULL
;
1951 (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
1952 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
1953 sizeof(*awake_dws
), &subcmd_len
);
1954 if (unlikely(ret
)) {
1955 WL_ERR(("nan_sub_cmd check failed\n"));
1959 awake_dws
= (wl_nan_awake_dws_t
*)sub_cmd
->data
;
1961 if (nan_attr_mask
& NAN_ATTR_2G_DW_CONFIG
) {
1962 awake_dws
->dw_interval_2g
= cmd_data
->awake_dws
.dw_interval_2g
;
1963 if (!awake_dws
->dw_interval_2g
) {
1964 /* Set 2G awake dw value to fw default value 1 */
1965 awake_dws
->dw_interval_2g
= NAN_SYNC_DEF_AWAKE_DW
;
1968 /* Set 2G awake dw value to fw default value 1 */
1969 awake_dws
->dw_interval_2g
= NAN_SYNC_DEF_AWAKE_DW
;
1972 if (cfg
->support_5g
) {
1973 if (nan_attr_mask
& NAN_ATTR_5G_DW_CONFIG
) {
1974 awake_dws
->dw_interval_5g
= cmd_data
->awake_dws
.dw_interval_5g
;
1975 if (!awake_dws
->dw_interval_5g
) {
1976 /* disable 5g beacon ctrls */
1977 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
,
1978 WL_NAN_CTRL_DISC_BEACON_TX_5G
,
1979 &(cmd_data
->status
), 0);
1980 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
1981 WL_ERR((" nan control set config handler,"
1982 " ret = %d status = %d \n",
1983 ret
, cmd_data
->status
));
1986 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
,
1987 WL_NAN_CTRL_SYNC_BEACON_TX_5G
,
1988 &(cmd_data
->status
), 0);
1989 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
1990 WL_ERR((" nan control set config handler,"
1991 " ret = %d status = %d \n",
1992 ret
, cmd_data
->status
));
1997 /* Set 5G awake dw value to fw default value 1 */
1998 awake_dws
->dw_interval_5g
= NAN_SYNC_DEF_AWAKE_DW
;
2002 sub_cmd
->id
= htod16(WL_NAN_CMD_SYNC_AWAKE_DWS
);
2003 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
2005 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
2007 nan_iov_data
->nan_iov_len
-= subcmd_len
;
2008 nan_iov_data
->nan_iov_buf
+= subcmd_len
;
2016 wl_cfgnan_start_handler(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
2017 nan_config_cmd_data_t
*cmd_data
, uint32 nan_attr_mask
)
2020 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
2021 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
2022 wl_nan_iov_t
*nan_iov_data
= NULL
;
2023 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
2024 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
2027 bool mutex_locked
= false;
2031 mutex_locked
= true;
2033 if (!wl_cfg80211_check_for_nan_support(cfg
)) {
2034 ret
= BCME_UNSUPPORTED
;
2039 ret
= wl_cfg80211_deinit_p2p_discovery(cfg
);
2040 if (ret
!= BCME_OK
) {
2041 WL_ERR(("Failed to disable p2p_disc during nan_enab"));
2043 WL_ERR(("Initializing NAN\n"));
2044 ret
= wl_cfgnan_init(cfg
);
2045 if (ret
!= BCME_OK
) {
2046 WL_ERR(("failed to initialize NAN[%d]\n", ret
));
2051 ret
= wl_cfgnan_set_if_addr(cfg
);
2052 if (ret
!= BCME_OK
) {
2053 WL_ERR(("Failed to set nmi address \n"));
2057 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
2058 /* Create NDI using the information provided by user space */
2059 if (cfg
->nancfg
.ndi
[i
].in_use
&& !cfg
->nancfg
.ndi
[i
].created
) {
2060 ret
= wl_cfgnan_data_path_iface_create_delete_handler(ndev
, cfg
,
2061 cfg
->nancfg
.ndi
[i
].ifname
,
2062 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE
, dhdp
->up
);
2064 WL_ERR(("failed to create ndp interface [%d]\n", ret
));
2067 cfg
->nancfg
.ndi
[i
].created
= true;
2071 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
2073 WL_ERR(("%s: memory allocation failed\n", __func__
));
2078 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
2079 if (!nan_iov_data
) {
2080 WL_ERR(("%s: memory allocation failed\n", __func__
));
2085 nan_iov_data
->nan_iov_len
= NAN_IOCTL_BUF_SIZE
;
2086 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
2088 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
2089 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
2091 if (nan_attr_mask
& NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG
) {
2092 /* config sync/discovery beacons on 2G band */
2093 /* 2g is mandatory */
2094 if (!cmd_data
->beacon_2g_val
) {
2095 WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2098 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
,
2099 WL_NAN_CTRL_DISC_BEACON_TX_2G
| WL_NAN_CTRL_SYNC_BEACON_TX_2G
,
2100 &(cmd_data
->status
), TRUE
);
2101 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2102 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2103 ret
, cmd_data
->status
));
2107 if (nan_attr_mask
& NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG
) {
2108 /* config sync/discovery beacons on 5G band */
2109 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
,
2110 WL_NAN_CTRL_DISC_BEACON_TX_5G
| WL_NAN_CTRL_SYNC_BEACON_TX_5G
,
2111 &(cmd_data
->status
), cmd_data
->beacon_5g_val
);
2112 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2113 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2114 ret
, cmd_data
->status
));
2118 /* Setting warm up time */
2119 cmd_data
->warmup_time
= 1;
2120 if (cmd_data
->warmup_time
) {
2121 ret
= wl_cfgnan_warmup_time_handler(cmd_data
, nan_iov_data
);
2122 if (unlikely(ret
)) {
2123 WL_ERR(("warm up time handler sub_cmd set failed\n"));
2128 /* setting master preference and random factor */
2129 ret
= wl_cfgnan_set_election_metric(cmd_data
, nan_iov_data
, nan_attr_mask
);
2130 if (unlikely(ret
)) {
2131 WL_ERR(("election_metric sub_cmd set failed\n"));
2137 /* setting nan social channels */
2138 ret
= wl_cfgnan_set_nan_soc_chans(ndev
, cmd_data
, nan_iov_data
, nan_attr_mask
);
2139 if (unlikely(ret
)) {
2140 WL_ERR(("nan social channels set failed\n"));
2143 /* Storing 5g capability which is reqd for avail chan config. */
2144 cfg
->support_5g
= cmd_data
->support_5g
;
2148 if ((cmd_data
->support_2g
) && ((cmd_data
->dwell_time
[0]) ||
2149 (cmd_data
->scan_period
[0]))) {
2150 /* setting scan params */
2151 ret
= wl_cfgnan_set_nan_scan_params(ndev
, cfg
, cmd_data
, 0, nan_attr_mask
);
2152 if (unlikely(ret
)) {
2153 WL_ERR(("scan params set failed for 2g\n"));
2158 if ((cmd_data
->support_5g
) && ((cmd_data
->dwell_time
[1]) ||
2159 (cmd_data
->scan_period
[1]))) {
2160 /* setting scan params */
2161 ret
= wl_cfgnan_set_nan_scan_params(ndev
, cfg
, cmd_data
,
2162 cmd_data
->support_5g
, nan_attr_mask
);
2163 if (unlikely(ret
)) {
2164 WL_ERR(("scan params set failed for 5g\n"));
2170 * A cluster_low value matching cluster_high indicates a request
2171 * to join a cluster with that value.
2172 * If the requested cluster is not found the
2173 * device will start its own cluster
2175 /* For Debug purpose, using clust id compulsion */
2176 if (!ETHER_ISNULLADDR(&cmd_data
->clus_id
.octet
)) {
2177 if ((cmd_data
->clus_id
.octet
[4] == cmd_data
->clus_id
.octet
[5])) {
2178 /* device will merge to configured CID only */
2179 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
,
2180 WL_NAN_CTRL_MERGE_CONF_CID_ONLY
, &(cmd_data
->status
), true);
2181 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2182 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2183 ret
, cmd_data
->status
));
2187 /* setting cluster ID */
2188 ret
= wl_cfgnan_set_cluster_id(cmd_data
, nan_iov_data
);
2189 if (unlikely(ret
)) {
2190 WL_ERR(("cluster_id sub_cmd set failed\n"));
2196 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2197 ret
= wl_cfgnan_set_rssi_proximity(cmd_data
, nan_iov_data
, nan_attr_mask
);
2198 if (unlikely(ret
)) {
2199 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2205 /* setting rssi middle/close values for 2.4GHz and 5GHz */
2206 ret
= wl_cfgnan_set_rssi_mid_or_close(cmd_data
, nan_iov_data
, nan_attr_mask
);
2207 if (unlikely(ret
)) {
2208 WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2214 /* setting hop count limit or threshold */
2215 if (nan_attr_mask
& NAN_ATTR_HOP_COUNT_LIMIT_CONFIG
) {
2216 ret
= wl_cfgnan_set_hop_count_limit(cmd_data
, nan_iov_data
);
2217 if (unlikely(ret
)) {
2218 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2224 /* setting sid beacon val */
2225 if ((nan_attr_mask
& NAN_ATTR_SID_BEACON_CONFIG
) ||
2226 (nan_attr_mask
& NAN_ATTR_SUB_SID_BEACON_CONFIG
)) {
2227 ret
= wl_cfgnan_set_sid_beacon_val(cmd_data
, nan_iov_data
, nan_attr_mask
);
2228 if (unlikely(ret
)) {
2229 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2235 /* setting nan oui */
2236 if (nan_attr_mask
& NAN_ATTR_OUI_CONFIG
) {
2237 ret
= wl_cfgnan_set_nan_oui(cmd_data
, nan_iov_data
);
2238 if (unlikely(ret
)) {
2239 WL_ERR(("nan_oui sub_cmd set failed\n"));
2245 /* setting nan awake dws */
2246 ret
= wl_cfgnan_set_awake_dws(ndev
, cmd_data
,
2247 nan_iov_data
, cfg
, nan_attr_mask
);
2248 if (unlikely(ret
)) {
2249 WL_ERR(("nan awake dws set failed\n"));
2256 ret
= wl_cfgnan_config_eventmask(ndev
, cfg
, cmd_data
->disc_ind_cfg
, false);
2257 if (unlikely(ret
)) {
2261 /* setting nan enable sub_cmd */
2262 ret
= wl_cfgnan_enable_handler(nan_iov_data
, true);
2263 if (unlikely(ret
)) {
2264 WL_ERR(("enable handler sub_cmd set failed\n"));
2268 nan_buf
->is_set
= true;
2270 nan_buf_size
-= nan_iov_data
->nan_iov_len
;
2271 memset(resp_buf
, 0, sizeof(resp_buf
));
2272 mutex_locked
= false;
2273 /* Reset conditon variable */
2274 cfg
->nancfg
.nan_event_recvd
= false;
2275 /* Releasing lock to allow event processing */
2277 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
2278 &(cmd_data
->status
), (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
2279 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2280 WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2281 ret
, cmd_data
->status
));
2284 timeout
= wait_event_timeout(cfg
->nancfg
.nan_event_wait
,
2285 cfg
->nancfg
.nan_event_recvd
, msecs_to_jiffies(NAN_START_STOP_TIMEOUT
));
2287 WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2292 /* If set, auto datapath confirms will be sent by FW */
2293 ret
= wl_cfgnan_config_control_flag(ndev
, cfg
, WL_NAN_CTRL_AUTO_DPCONF
,
2294 &(cmd_data
->status
), true);
2295 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2296 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2297 ret
, cmd_data
->status
));
2300 WL_INFORM_MEM(("[NAN] Enable successfull \n"));
2302 /* reset conditon variable */
2303 cfg
->nancfg
.nan_event_recvd
= false;
2304 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2305 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
2306 if (cfg
->nancfg
.ndi
[i
].in_use
&& cfg
->nancfg
.ndi
[i
].created
) {
2307 WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i
));
2308 ret
= wl_cfgnan_data_path_iface_create_delete_handler(ndev
, cfg
,
2309 (char*)cfg
->nancfg
.ndi
[i
].ifname
,
2310 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE
, dhdp
->up
);
2312 WL_ERR(("failed to delete ndp iface [%d]\n", ret
));
2318 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
2321 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
2332 wl_cfgnan_disable(struct bcm_cfg80211
*cfg
, nan_stop_reason_code_t reason
)
2335 dhd_pub_t
*dhdp
= (dhd_pub_t
*)(cfg
->pub
);
2339 if (cfg
->nan_enable
) {
2340 struct net_device
*ndev
;
2341 ndev
= bcmcfg_to_prmry_ndev(cfg
);
2342 cfg
->nancfg
.disable_reason
= reason
;
2343 ret
= wl_cfgnan_stop_handler(ndev
, cfg
, false);
2344 if (ret
!= BCME_OK
) {
2345 WL_ERR(("failed to stop nan, error[%d]\n", ret
));
2347 /* We have to remove NDIs so that P2P/Softap can work */
2348 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
2349 if (cfg
->nancfg
.ndi
[i
].in_use
&& cfg
->nancfg
.ndi
[i
].created
) {
2350 WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i
));
2351 ret
= wl_cfgnan_data_path_iface_create_delete_handler(ndev
, cfg
,
2352 (char*)cfg
->nancfg
.ndi
[i
].ifname
,
2353 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE
, dhdp
->up
);
2355 WL_ERR(("failed to delete ndp iface [%d]\n", ret
));
2357 cfg
->nancfg
.ndi
[i
].created
= false;
2360 ret
= wl_cfgnan_deinit(cfg
, dhdp
->up
);
2361 if (ret
!= BCME_OK
) {
2362 WL_ERR(("failed to de-initialize NAN[%d]\n", ret
));
2370 wl_cfgnan_send_stop_event(nan_event_data_t
*nan_event_data
, struct bcm_cfg80211
*cfg
)
2374 memset(nan_event_data
, 0, NAN_IOCTL_BUF_SIZE
);
2375 nan_event_data
->status
= NAN_STATUS_SUCCESS
;
2376 memcpy(nan_event_data
->nan_reason
, "NAN_STATUS_SUCCESS",
2377 strlen("NAN_STATUS_SUCCESS"));
2378 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
2379 ret
= wl_cfgvendor_send_nan_event(cfg
->wdev
->wiphy
, bcmcfg_to_prmry_ndev(cfg
),
2380 GOOGLE_NAN_EVENT_DISABLED
, nan_event_data
);
2381 if (ret
!= BCME_OK
) {
2382 WL_ERR(("Failed to send event to nan hal, (%d)\n",
2383 GOOGLE_NAN_EVENT_DISABLED
));
2385 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
2386 WL_INFORM(("Sending disabled event if Bus is down\n"));
2387 /* Resetting instance ID mask */
2388 cfg
->nancfg
.inst_id_start
= 0;
2389 memset(cfg
->nancfg
.svc_inst_id_mask
, 0, sizeof(cfg
->nancfg
.svc_inst_id_mask
));
2390 memset(cfg
->svc_info
, 0, NAN_MAX_SVC_INST
* sizeof(nan_svc_info_t
));
2391 cfg
->nan_enable
= false;
2397 wl_cfgnan_stop_handler(struct net_device
*ndev
,
2398 struct bcm_cfg80211
*cfg
, bool disable_events
)
2400 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
2401 s32 ret
= BCME_ERROR
;
2402 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
2403 wl_nan_iov_t
*nan_iov_data
= NULL
;
2405 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
2406 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
2407 uint8 buf
[NAN_IOCTL_BUF_SIZE
];
2408 nan_event_data_t
*nan_event_data
= (nan_event_data_t
*)buf
;
2410 bool mutex_locked
= false;
2414 mutex_locked
= true;
2416 if (!cfg
->nan_enable
) {
2417 WL_INFORM(("Nan is not enabled\n"));
2422 if (cfg
->nancfg
.disable_reason
!= NAN_BUS_IS_DOWN
) {
2424 * Framework doing cleanup(iface remove) on disable command,
2425 * so avoiding event to prevent iface delete calls again
2427 if (disable_events
) {
2428 WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
2429 wl_cfgnan_config_eventmask(ndev
, cfg
, 0, true);
2431 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
2433 WL_ERR(("%s: memory allocation failed\n", __func__
));
2438 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
2439 if (!nan_iov_data
) {
2440 WL_ERR(("%s: memory allocation failed\n", __func__
));
2445 nan_iov_data
->nan_iov_len
= NAN_IOCTL_BUF_SIZE
;
2446 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
2448 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
2449 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
2451 ret
= wl_cfgnan_enable_handler(nan_iov_data
, false);
2452 if (unlikely(ret
)) {
2453 WL_ERR(("nan disable handler failed\n"));
2457 nan_buf
->is_set
= true;
2458 nan_buf_size
-= nan_iov_data
->nan_iov_len
;
2459 memset(resp_buf
, 0, sizeof(resp_buf
));
2460 mutex_locked
= false;
2461 /* reset conditon variable */
2462 cfg
->nancfg
.nan_event_recvd
= false;
2463 /* Releasing lock to allow event processing */
2465 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, &status
,
2466 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
2467 if (unlikely(ret
) || unlikely(status
)) {
2468 WL_ERR(("nan disable failed ret = %d status = %d\n", ret
, status
));
2471 cfg
->nan_enable
= false;
2472 timeout
= wait_event_timeout(cfg
->nancfg
.nan_event_wait
,
2473 cfg
->nancfg
.nan_event_recvd
,
2474 msecs_to_jiffies(NAN_START_STOP_TIMEOUT
));
2476 WL_ERR(("Timed out while Waiting for"
2477 " WL_NAN_EVENT_STOP event !!!\n"));
2481 WL_INFORM_MEM(("[NAN] Disable done\n"));
2483 /* Sending up NAN disabled event, to clear the nan state in framework */
2484 ret
= wl_cfgnan_send_stop_event(nan_event_data
, cfg
);
2487 /* reset conditon variable */
2488 cfg
->nancfg
.nan_event_recvd
= false;
2490 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
2493 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
2503 wl_cfgnan_config_handler(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
2504 nan_config_cmd_data_t
*cmd_data
, uint32 nan_attr_mask
)
2506 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
2508 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
2509 wl_nan_iov_t
*nan_iov_data
= NULL
;
2510 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
2511 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
2515 /* Nan need to be enabled before configuring/updating params */
2516 if (cfg
->nan_enable
) {
2517 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
2519 WL_ERR(("%s: memory allocation failed\n", __func__
));
2524 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
2525 if (!nan_iov_data
) {
2526 WL_ERR(("%s: memory allocation failed\n", __func__
));
2531 nan_iov_data
->nan_iov_len
= NAN_IOCTL_BUF_SIZE
;
2532 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
2534 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
2535 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
2537 /* setting sid beacon val */
2538 if ((nan_attr_mask
& NAN_ATTR_SID_BEACON_CONFIG
) ||
2539 (nan_attr_mask
& NAN_ATTR_SUB_SID_BEACON_CONFIG
)) {
2540 ret
= wl_cfgnan_set_sid_beacon_val(cmd_data
, nan_iov_data
, nan_attr_mask
);
2541 if (unlikely(ret
)) {
2542 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2548 /* setting master preference and random factor */
2549 if (cmd_data
->metrics
.random_factor
||
2550 cmd_data
->metrics
.master_pref
) {
2551 ret
= wl_cfgnan_set_election_metric(cmd_data
, nan_iov_data
,
2553 if (unlikely(ret
)) {
2554 WL_ERR(("election_metric sub_cmd set failed\n"));
2561 /* setting hop count limit or threshold */
2562 if (nan_attr_mask
& NAN_ATTR_HOP_COUNT_LIMIT_CONFIG
) {
2563 ret
= wl_cfgnan_set_hop_count_limit(cmd_data
, nan_iov_data
);
2564 if (unlikely(ret
)) {
2565 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2571 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2572 ret
= wl_cfgnan_set_rssi_proximity(cmd_data
, nan_iov_data
,
2574 if (unlikely(ret
)) {
2575 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2581 /* setting nan awake dws */
2582 ret
= wl_cfgnan_set_awake_dws(ndev
, cmd_data
, nan_iov_data
,
2583 cfg
, nan_attr_mask
);
2584 if (unlikely(ret
)) {
2585 WL_ERR(("nan awake dws set failed\n"));
2591 if (cmd_data
->disc_ind_cfg
) {
2592 /* Disable events */
2593 WL_TRACE(("Disable events based on flag\n"));
2594 ret
= wl_cfgnan_config_eventmask(ndev
, cfg
,
2595 cmd_data
->disc_ind_cfg
, false);
2596 if (unlikely(ret
)) {
2601 if ((cfg
->support_5g
) && ((cmd_data
->dwell_time
[1]) ||
2602 (cmd_data
->scan_period
[1]))) {
2603 /* setting scan params */
2604 ret
= wl_cfgnan_set_nan_scan_params(ndev
, cfg
,
2605 cmd_data
, cfg
->support_5g
, nan_attr_mask
);
2606 if (unlikely(ret
)) {
2607 WL_ERR(("scan params set failed for 5g\n"));
2611 if ((cmd_data
->dwell_time
[0]) ||
2612 (cmd_data
->scan_period
[0])) {
2613 ret
= wl_cfgnan_set_nan_scan_params(ndev
, cfg
, cmd_data
, 0, nan_attr_mask
);
2614 if (unlikely(ret
)) {
2615 WL_ERR(("scan params set failed for 2g\n"));
2619 nan_buf
->is_set
= true;
2620 nan_buf_size
-= nan_iov_data
->nan_iov_len
;
2622 if (nan_buf
->count
) {
2623 memset(resp_buf
, 0, sizeof(resp_buf
));
2624 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
2625 &(cmd_data
->status
),
2626 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
2627 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
2628 WL_ERR((" nan config handler failed ret = %d status = %d\n",
2629 ret
, cmd_data
->status
));
2633 WL_DBG(("No commands to send\n"));
2636 if ((!cmd_data
->bmap
) || (cmd_data
->avail_params
.duration
== NAN_BAND_INVALID
) ||
2637 (!cmd_data
->chanspec
[0])) {
2638 WL_TRACE(("mandatory arguments are not present to set avail\n"));
2641 cmd_data
->avail_params
.chanspec
[0] = cmd_data
->chanspec
[0];
2642 cmd_data
->avail_params
.bmap
= cmd_data
->bmap
;
2643 /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
2644 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
2645 cfg
, &cmd_data
->avail_params
, WL_AVAIL_LOCAL
);
2646 if (unlikely(ret
)) {
2647 WL_ERR(("Failed to set avail value with type local\n"));
2651 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
2652 cfg
, &cmd_data
->avail_params
, WL_AVAIL_NDC
);
2653 if (unlikely(ret
)) {
2654 WL_ERR(("Failed to set avail value with type ndc\n"));
2659 WL_INFORM(("nan is not enabled\n"));
2664 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
2667 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
2675 wl_cfgnan_support_handler(struct net_device
*ndev
,
2676 struct bcm_cfg80211
*cfg
, nan_config_cmd_data_t
*cmd_data
)
2683 wl_cfgnan_status_handler(struct net_device
*ndev
,
2684 struct bcm_cfg80211
*cfg
, nan_config_cmd_data_t
*cmd_data
)
2690 #ifdef WL_NAN_DISC_CACHE
2693 wl_cfgnan_get_svc_inst(struct bcm_cfg80211
*cfg
,
2694 wl_nan_instance_id svc_inst_id
, uint8 ndp_id
)
2698 for (i
= 0; i
< NAN_MAX_SVC_INST
; i
++) {
2699 for (j
= 0; j
< NAN_MAX_SVC_INST
; j
++) {
2700 if (cfg
->svc_info
[i
].ndp_id
[j
] == ndp_id
) {
2701 return &cfg
->svc_info
[i
];
2705 } else if (svc_inst_id
) {
2706 for (i
= 0; i
< NAN_MAX_SVC_INST
; i
++) {
2707 if (cfg
->svc_info
[i
].svc_id
== svc_inst_id
) {
2708 return &cfg
->svc_info
[i
];
2717 nan_ranging_inst_t
*
2718 wl_cfgnan_check_for_ranging(struct bcm_cfg80211
*cfg
, struct ether_addr
*peer
)
2722 for (i
= 0; i
< NAN_MAX_RANGING_INST
; i
++) {
2723 if (!memcmp(peer
, &cfg
->nan_ranging_info
[i
].peer_addr
,
2725 return &(cfg
->nan_ranging_info
[i
]);
2733 nan_ranging_inst_t
*
2734 wl_cfgnan_get_ranging_inst(struct bcm_cfg80211
*cfg
, struct ether_addr
*peer
,
2735 uint8 svc_id
, bool create
)
2737 nan_ranging_inst_t
*ranging_inst
= NULL
;
2740 ranging_inst
= wl_cfgnan_check_for_ranging(cfg
, peer
);
2745 WL_TRACE(("Creating Ranging instance \n"));
2746 for (i
= 0; i
< NAN_MAX_RANGING_INST
; i
++) {
2747 if (cfg
->nan_ranging_info
[i
].range_id
== 0)
2750 if (i
== NAN_MAX_RANGING_INST
) {
2751 WL_DBG(("No buffer available for the ranging instance"));
2754 ranging_inst
= &cfg
->nan_ranging_info
[i
];
2755 memcpy(&ranging_inst
->peer_addr
, peer
, ETHER_ADDR_LEN
);
2756 ranging_inst
->range_status
= NAN_RANGING_REQUIRED
;
2757 ranging_inst
->svc_inst_id
= svc_id
;
2761 return ranging_inst
;
2763 #endif /* WL_NAN_DISC_CACHE */
2766 process_resp_buf(void *iov_resp
,
2767 uint8
*instance_id
, uint16 sub_cmd_id
)
2772 if (sub_cmd_id
== WL_NAN_CMD_DATA_DATAREQ
) {
2773 wl_nan_dp_req_ret_t
*dpreq_ret
= NULL
;
2774 dpreq_ret
= (wl_nan_dp_req_ret_t
*)(iov_resp
);
2775 *instance_id
= dpreq_ret
->ndp_id
;
2776 WL_TRACE(("%s: Initiator NDI: " MACDBG
"\n",
2777 __FUNCTION__
, MAC2STRDBG(dpreq_ret
->indi
.octet
)));
2778 } else if (sub_cmd_id
== WL_NAN_CMD_RANGE_REQUEST
) {
2779 wl_nan_range_id
*range_id
= NULL
;
2780 range_id
= (wl_nan_range_id
*)(iov_resp
);
2781 *instance_id
= *range_id
;
2782 WL_TRACE(("Range id: %d\n", *range_id
));
2784 WL_DBG(("instance_id: %d\n", *instance_id
));
2789 #ifdef WL_NAN_DISC_CACHE
2791 wl_cfgnan_cancel_ranging(struct net_device
*ndev
,
2792 struct bcm_cfg80211
*cfg
, uint8 range_id
, uint32
*status
)
2794 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
2796 uint16 nan_iov_start
, nan_iov_end
;
2797 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
2799 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
2800 wl_nan_iov_t
*nan_iov_data
= NULL
;
2801 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
2803 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
2807 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
2809 WL_ERR(("%s: memory allocation failed\n", __func__
));
2814 nan_iov_data
= MALLOCZ(dhdp
->osh
, sizeof(*nan_iov_data
));
2815 if (!nan_iov_data
) {
2816 WL_ERR(("%s: memory allocation failed\n", __func__
));
2821 nan_iov_data
->nan_iov_len
= nan_iov_start
= NAN_IOCTL_BUF_SIZE
;
2822 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
2824 nan_iov_data
->nan_iov_buf
= (uint8
*)(&nan_buf
->cmds
[0]);
2825 nan_iov_data
->nan_iov_len
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
2826 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(nan_iov_data
->nan_iov_buf
);
2828 ret
= wl_cfg_nan_check_cmd_len(nan_iov_data
->nan_iov_len
,
2829 sizeof(range_id
), &subcmd_len
);
2830 if (unlikely(ret
)) {
2831 WL_ERR(("nan_sub_cmd check failed\n"));
2835 sub_cmd
->id
= htod16(WL_NAN_CMD_RANGE_CANCEL
);
2836 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(range_id
);
2837 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
2839 /* Reduce the iov_len size by subcmd_len */
2840 nan_iov_data
->nan_iov_len
-= subcmd_len
;
2841 nan_iov_end
= nan_iov_data
->nan_iov_len
;
2842 nan_buf_size
= (nan_iov_start
- nan_iov_end
);
2844 memcpy(sub_cmd
->data
, &range_id
, sizeof(range_id
));
2846 nan_buf
->is_set
= true;
2848 memset(resp_buf
, 0, sizeof(resp_buf
));
2849 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
, status
,
2850 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
2851 if (unlikely(ret
) || unlikely(*status
)) {
2852 WL_ERR(("Range cancel failed ret %d status %d \n", ret
, *status
));
2855 WL_INFORM(("Range cancel with Range ID [%d] successfull\n", range_id
));
2858 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
2861 MFREE(dhdp
->osh
, nan_iov_data
, sizeof(*nan_iov_data
));
2868 wl_cfgnan_cache_svc_info(struct bcm_cfg80211
*cfg
,
2869 nan_discover_cmd_data_t
*cmd_data
, uint16 cmd_id
)
2873 nan_svc_info_t
*svc_info
;
2875 for (i
= 0; i
< NAN_MAX_SVC_INST
; i
++) {
2876 if (!cfg
->svc_info
[i
].svc_id
) {
2877 svc_info
= &cfg
->svc_info
[i
];
2881 if (i
== NAN_MAX_SVC_INST
) {
2882 WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__
));
2883 ret
= BCME_NORESOURCE
;
2886 if (cmd_data
->sde_control_flag
& NAN_SDE_CF_RANGING_REQUIRED
) {
2887 WL_TRACE(("%s:updating ranging info", __FUNCTION__
));
2888 svc_info
->status
= 1;
2889 svc_info
->ranging_interval
= cmd_data
->ranging_intvl_msec
;
2890 svc_info
->ranging_ind
= cmd_data
->ranging_indication
;
2891 svc_info
->ingress_limit
= cmd_data
->ingress_limit
;
2892 svc_info
->egress_limit
= cmd_data
->egress_limit
;
2893 svc_info
->ranging_required
= 1;
2895 if (cmd_id
== WL_NAN_CMD_SD_SUBSCRIBE
) {
2896 svc_info
->svc_id
= cmd_data
->sub_id
;
2897 if ((cmd_data
->flags
& WL_NAN_SUB_ACTIVE
) &&
2898 (cmd_data
->tx_match
.dlen
)) {
2899 memcpy(svc_info
->tx_match_filter
,
2900 cmd_data
->tx_match
.data
, cmd_data
->tx_match
.dlen
);
2901 svc_info
->tx_match_filter_len
= cmd_data
->tx_match
.dlen
;
2904 svc_info
->svc_id
= cmd_data
->pub_id
;
2906 memcpy(svc_info
->svc_hash
, cmd_data
->svc_hash
.data
, WL_NAN_SVC_HASH_LEN
);
2912 /* terminate all ranging sessions associated with a svc */
2914 wl_cfgnan_terminate_ranging_sessions(struct net_device
*ndev
,
2915 struct bcm_cfg80211
*cfg
, uint8 svc_id
)
2917 /* cancel all related ranging instances */
2921 nan_ranging_inst_t
*ranging_inst
;
2922 nan_svc_info_t
*svc
;
2923 for (i
= 0; i
< NAN_MAX_RANGING_INST
; i
++) {
2924 ranging_inst
= &cfg
->nan_ranging_info
[i
];
2925 if (ranging_inst
->range_id
&& ranging_inst
->svc_inst_id
== svc_id
) {
2926 ret
= wl_cfgnan_cancel_ranging(ndev
, cfg
, ranging_inst
->range_id
,
2928 if (unlikely(ret
) || unlikely(status
)) {
2929 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
2930 __FUNCTION__
, ret
, status
));
2932 memset(ranging_inst
, 0, sizeof(nan_ranging_inst_t
));
2933 WL_DBG(("Range cancelled \n"));
2937 /* clear command ranging info */
2938 svc
= wl_cfgnan_get_svc_inst(cfg
, svc_id
, 0);
2940 WL_DBG(("clearing cached svc info for svc id %d\n", svc_id
));
2941 memset(svc
, 0, sizeof(*svc
));
2947 wl_cfgnan_check_disc_res_for_ranging(struct bcm_cfg80211
*cfg
,
2948 nan_event_data_t
* nan_event_data
)
2950 nan_svc_info_t
*svc
;
2953 svc
= wl_cfgnan_get_svc_inst(cfg
, nan_event_data
->sub_id
, 0);
2955 if (svc
&& svc
->ranging_required
) {
2956 nan_ranging_inst_t
*ranging_inst
;
2957 ranging_inst
= wl_cfgnan_get_ranging_inst(cfg
,
2958 &nan_event_data
->remote_nmi
, nan_event_data
->sub_id
, TRUE
);
2959 if (ranging_inst
->range_status
!=
2960 NAN_RANGING_IN_PROGRESS
) {
2961 WL_DBG(("Trigger range request\n"));
2962 ret
= wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg
),
2963 cfg
, ranging_inst
, svc
, NAN_RANGE_REQ_CMD
);
2964 if (unlikely(ret
)) {
2965 WL_ERR(("Failed to trigger ranging, ret = (%d)\n", ret
));
2966 memset(ranging_inst
, 0, sizeof(*ranging_inst
));
2970 /* Disc event will be given on receving range_rpt event */
2971 WL_TRACE(("Disc event will given when Range RPT event is recvd"));
2973 ret
= BCME_UNSUPPORTED
;
2979 /* ranging reqeust event handler */
2981 wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211
*cfg
,
2982 wl_nan_ev_rng_req_ind_t
*rng_ind
)
2985 nan_svc_info_t
*svc
= NULL
;
2986 nan_ranging_inst_t
*ranging_inst
;
2989 WL_DBG(("Trigger range response\n"));
2990 for (i
= 0; i
< NAN_MAX_RANGING_INST
; i
++) {
2991 if (cfg
->svc_info
[i
].ranging_required
) {
2992 svc
= &cfg
->svc_info
[i
];
2997 * no publisher indicated ranging support,
2998 * ignoring ranging request for now
3000 WL_TRACE(("No publisher has ranging supported.so will reject in trigger api"));
3004 ranging_inst
= wl_cfgnan_get_ranging_inst(cfg
, &rng_ind
->peer_m_addr
,
3006 if (ranging_inst
&& ranging_inst
->range_status
!= NAN_RANGING_IN_PROGRESS
) {
3007 ranging_inst
->range_id
= rng_ind
->rng_id
;
3008 ret
= wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg
), cfg
,
3009 ranging_inst
, svc
, NAN_RANGE_REQ_EVNT
);
3011 if (unlikely(ret
)) {
3012 WL_ERR(("Failed to trigger range response, ret = (%d)\n", ret
));
3013 memset(ranging_inst
, 0, sizeof(*ranging_inst
));
3017 WL_INFORM(("Ranging for the peer already in progress"));
3025 /* ranging quest and response iovar handler */
3027 wl_cfgnan_trigger_ranging(struct net_device
*ndev
, struct bcm_cfg80211
*cfg
,
3028 void *ranging_ctxt
, nan_svc_info_t
*svc
, uint8 range_cmd
)
3031 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3032 wl_nan_range_req_t
*range_req
= NULL
;
3033 wl_nan_range_resp_t
*range_resp
= NULL
;
3034 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
3035 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
3037 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE_MED
];
3038 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
3039 nan_ranging_inst_t
*ranging_inst
= (nan_ranging_inst_t
*)ranging_ctxt
;
3040 nan_avail_cmd_data cmd_data
;
3044 memset(&cmd_data
, 0, sizeof(cmd_data
));
3045 memcpy(&cmd_data
.peer_nmi
, &ranging_inst
->peer_addr
, ETHER_ADDR_LEN
);
3046 cmd_data
.avail_period
= NAN_RANGING_PERIOD
;
3047 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
3048 cfg
, &cmd_data
, WL_AVAIL_LOCAL
);
3049 if (unlikely(ret
)) {
3050 WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
3054 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
3055 cfg
, &cmd_data
, WL_AVAIL_RANGING
);
3056 if (unlikely(ret
)) {
3057 WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
3061 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
3063 WL_ERR(("%s: memory allocation failed\n", __func__
));
3068 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
3070 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
3072 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
3073 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
3074 if (range_cmd
== NAN_RANGE_REQ_CMD
) {
3075 sub_cmd
->id
= htod16(WL_NAN_CMD_RANGE_REQUEST
);
3076 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(wl_nan_range_req_t
);
3077 range_req
= (wl_nan_range_req_t
*)(sub_cmd
->data
);
3078 /* ranging config */
3079 range_req
->peer
= ranging_inst
->peer_addr
;
3080 range_req
->interval
= svc
->ranging_interval
;
3081 /* Limits are in cm from host */
3082 range_req
->ingress
= (svc
->ingress_limit
*10);
3083 range_req
->egress
= (svc
->egress_limit
*10);
3084 range_req
->indication
= svc
->ranging_ind
;
3086 /* range response config */
3087 sub_cmd
->id
= htod16(WL_NAN_CMD_RANGE_RESPONSE
);
3088 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(wl_nan_range_resp_t
);
3089 range_resp
= (wl_nan_range_resp_t
*)(sub_cmd
->data
);
3090 range_resp
->range_id
= ranging_inst
->range_id
;
3091 range_resp
->status
= svc
->status
;
3092 nan_buf
->is_set
= true;
3095 nan_buf_size
-= (sub_cmd
->len
+
3096 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
3099 memset(resp_buf
, 0, sizeof(resp_buf
));
3100 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
3102 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
3103 if (unlikely(ret
) || unlikely(status
)) {
3104 WL_ERR(("nan ranging failed ret = %d status = %d\n",
3109 WL_TRACE(("nan ranging trigger successful\n"));
3111 /* check the response buff for request */
3112 if (range_cmd
== NAN_RANGE_REQ_CMD
) {
3113 ret
= process_resp_buf(resp_buf
+ WL_NAN_OBUF_DATA_OFFSET
,
3114 &ranging_inst
->range_id
, WL_NAN_CMD_RANGE_REQUEST
);
3115 WL_TRACE(("\n ranging instance returned %d\n", ranging_inst
->range_id
));
3117 /* Preventing continuous range requests */
3118 ranging_inst
->range_status
= NAN_RANGING_IN_PROGRESS
;
3122 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
3128 #endif /* WL_NAN_DISC_CACHE */
3130 static void *wl_nan_bloom_alloc(void *ctx
, uint size
)
3135 buf
= kmalloc(size
, GFP_KERNEL
);
3137 WL_ERR(("%s: memory allocation failed\n", __func__
));
3143 static void wl_nan_bloom_free(void *ctx
, void *buf
, uint size
)
3146 BCM_REFERENCE(size
);
3152 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3153 #pragma GCC diagnostic ignored "-Wcast-qual"
3155 static uint
wl_nan_hash(void *ctx
, uint index
, const uint8
*input
, uint input_len
)
3157 uint8
* filter_idx
= (uint8
*)ctx
;
3158 uint8 i
= (*filter_idx
* WL_NAN_HASHES_PER_BLOOM
) + (uint8
)index
;
3161 /* Steps 1 and 2 as explained in Section 6.2 */
3162 /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
3163 b
= hndcrc32(&i
, sizeof(uint8
), CRC32_INIT_VALUE
);
3164 b
= hndcrc32((uint8
*)input
, input_len
, b
);
3165 /* Obtain the last 2 bytes of the CRC32 output */
3166 b
&= NAN_BLOOM_CRC32_MASK
;
3168 /* Step 3 is completed by bcmbloom functions */
3172 static int wl_nan_bloom_create(bcm_bloom_filter_t
**bp
, uint
*idx
, uint size
)
3177 err
= bcm_bloom_create(wl_nan_bloom_alloc
, wl_nan_bloom_free
,
3178 idx
, WL_NAN_HASHES_PER_BLOOM
, size
, bp
);
3179 if (err
!= BCME_OK
) {
3183 /* Populate bloom filter with hash functions */
3184 for (i
= 0; i
< WL_NAN_HASHES_PER_BLOOM
; i
++) {
3185 err
= bcm_bloom_add_hash(*bp
, wl_nan_hash
, &i
);
3187 WL_ERR(("bcm_bloom_add_hash failed\n"));
3196 wl_cfgnan_sd_params_handler(struct net_device
*ndev
,
3197 nan_discover_cmd_data_t
*cmd_data
, uint16 cmd_id
,
3198 void *p_buf
, uint16
*nan_buf_size
)
3201 uint8
*pxtlv
, *srf
= NULL
, *srf_mac
= NULL
, *srftmp
= NULL
;
3202 uint16 buflen_avail
;
3203 bcm_iov_batch_subcmd_t
*sub_cmd
= (bcm_iov_batch_subcmd_t
*)(p_buf
);
3204 wl_nan_sd_params_t
*sd_params
= (wl_nan_sd_params_t
*)sub_cmd
->data
;
3205 uint16 srf_size
= 0;
3207 bcm_bloom_filter_t
*bp
= NULL
;
3208 /* Bloom filter index default, indicates it has not been set */
3209 uint bloom_idx
= 0xFFFFFFFF;
3210 uint16 bloom_len
= NAN_BLOOM_LENGTH_DEFAULT
;
3211 /* srf_ctrl_size = bloom_len + src_control field */
3212 uint16 srf_ctrl_size
= bloom_len
+ 1;
3214 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
3215 struct bcm_cfg80211
*cfg
= wl_get_cfg(ndev
);
3220 if (cmd_data
->period
) {
3221 sd_params
->awake_dw
= cmd_data
->period
;
3223 sd_params
->period
= 1;
3225 if (cmd_data
->ttl
) {
3226 sd_params
->ttl
= cmd_data
->ttl
;
3228 sd_params
->ttl
= WL_NAN_TTL_UNTIL_CANCEL
;
3231 sd_params
->flags
= 0;
3232 sd_params
->flags
= cmd_data
->flags
;
3234 /* Nan Service Based event suppression Flags */
3235 if (cmd_data
->recv_ind_flag
) {
3236 /* BIT0 - If set, host wont rec event "terminated" */
3237 if (CHECK_BIT(cmd_data
->recv_ind_flag
, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT
)) {
3238 sd_params
->flags
|= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED
;
3241 /* BIT1 - If set, host wont receive match expiry evt */
3242 /* TODO: Exp not yet supported */
3243 if (CHECK_BIT(cmd_data
->recv_ind_flag
, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT
)) {
3244 WL_DBG(("Need to add match expiry event\n"));
3246 /* BIT2 - If set, host wont rec event "receive" */
3247 if (CHECK_BIT(cmd_data
->recv_ind_flag
, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT
)) {
3248 sd_params
->flags
|= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE
;
3250 /* BIT3 - If set, host wont rec event "replied" */
3251 if (CHECK_BIT(cmd_data
->recv_ind_flag
, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT
)) {
3252 sd_params
->flags
|= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED
;
3255 if (cmd_id
== WL_NAN_CMD_SD_PUBLISH
) {
3256 sd_params
->instance_id
= cmd_data
->pub_id
;
3257 if (cmd_data
->service_responder_policy
) {
3258 /* Do not disturb avail if dam is supported */
3259 if (FW_SUPPORTED(dhdp
, autodam
)) {
3260 /* Nan Accept policy: Per service basis policy
3261 * Based on this policy(ALL/NONE), responder side
3262 * will send ACCEPT/REJECT
3263 * If set, auto datapath responder will be sent by FW
3265 sd_params
->flags
|= WL_NAN_SVC_CTRL_AUTO_DPRESP
;
3267 WL_ERR(("svc specifiv auto dp resp is not"
3268 " supported in non-auto dam fw\n"));
3271 } else if (cmd_id
== WL_NAN_CMD_SD_SUBSCRIBE
) {
3272 sd_params
->instance_id
= cmd_data
->sub_id
;
3274 ret
= BCME_USAGE_ERROR
;
3275 WL_ERR(("wrong command id = %d \n", cmd_id
));
3279 if ((cmd_data
->svc_hash
.dlen
== WL_NAN_SVC_HASH_LEN
) &&
3280 (cmd_data
->svc_hash
.data
)) {
3281 memcpy((uint8
*)sd_params
->svc_hash
, cmd_data
->svc_hash
.data
,
3282 cmd_data
->svc_hash
.dlen
);
3284 prhex("hashed svc name", cmd_data
->svc_hash
.data
,
3285 cmd_data
->svc_hash
.dlen
);
3286 #endif /* WL_NAN_DEBUG */
3289 WL_ERR(("invalid svc hash data or length = %d\n",
3290 cmd_data
->svc_hash
.dlen
));
3294 /* check if ranging support is present in firmware */
3295 if ((cmd_data
->sde_control_flag
& NAN_SDE_CF_RANGING_REQUIRED
) &&
3296 !FW_SUPPORTED(dhdp
, nanrange
)) {
3297 WL_ERR(("Service requires ranging but fw doesnt support it\n"));
3298 ret
= BCME_UNSUPPORTED
;
3302 /* Optional parameters: fill the sub_command block with service descriptor attr */
3303 sub_cmd
->id
= htod16(cmd_id
);
3304 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
3305 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
3306 OFFSETOF(wl_nan_sd_params_t
, optional
[0]);
3307 pxtlv
= (uint8
*)&sd_params
->optional
[0];
3309 *nan_buf_size
-= sub_cmd
->len
;
3310 buflen_avail
= *nan_buf_size
;
3312 if (cmd_data
->svc_info
.data
&& cmd_data
->svc_info
.dlen
) {
3313 WL_TRACE(("optional svc_info present, pack it\n"));
3314 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3315 WL_NAN_XTLV_SD_SVC_INFO
,
3316 cmd_data
->svc_info
.dlen
,
3317 cmd_data
->svc_info
.data
, BCM_XTLV_OPTION_ALIGN32
);
3318 if (unlikely(ret
)) {
3319 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__
));
3324 if (cmd_data
->sde_svc_info
.data
&& cmd_data
->sde_svc_info
.dlen
) {
3325 WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
3326 cmd_data
->sde_svc_info
.dlen
));
3327 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3328 WL_NAN_XTLV_SD_SDE_SVC_INFO
,
3329 cmd_data
->sde_svc_info
.dlen
,
3330 cmd_data
->sde_svc_info
.data
, BCM_XTLV_OPTION_ALIGN32
);
3331 if (unlikely(ret
)) {
3332 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__
));
3337 if (cmd_data
->tx_match
.dlen
) {
3338 WL_TRACE(("optional tx match filter presnet (len=%d)\n",
3339 cmd_data
->tx_match
.dlen
));
3340 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3341 WL_NAN_XTLV_CFG_MATCH_TX
, cmd_data
->tx_match
.dlen
,
3342 cmd_data
->tx_match
.data
, BCM_XTLV_OPTION_ALIGN32
);
3343 if (unlikely(ret
)) {
3344 WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__
));
3349 if (cmd_data
->life_count
) {
3350 WL_TRACE(("optional life count is present, pack it\n"));
3351 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT
,
3352 sizeof(cmd_data
->life_count
), &cmd_data
->life_count
,
3353 BCM_XTLV_OPTION_ALIGN32
);
3354 if (unlikely(ret
)) {
3355 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__
));
3360 if (cmd_data
->use_srf
) {
3361 uint8 srf_control
= 0;
3362 /* set include bit */
3363 if (cmd_data
->srf_include
== true) {
3367 if (!ETHER_ISNULLADDR(&cmd_data
->mac_list
.list
) &&
3368 (cmd_data
->mac_list
.num_mac_addr
3369 < NAN_SRF_MAX_MAC
)) {
3370 if (cmd_data
->srf_type
== SRF_TYPE_SEQ_MAC_ADDR
) {
3372 srf_size
= (cmd_data
->mac_list
.num_mac_addr
3373 * ETHER_ADDR_LEN
) + NAN_SRF_CTRL_FIELD_LEN
;
3374 WL_TRACE(("srf size = %d\n", srf_size
));
3376 srf_mac
= MALLOCZ(dhdp
->osh
, srf_size
);
3377 if (srf_mac
== NULL
) {
3378 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
3382 memcpy(srf_mac
, &srf_control
, NAN_SRF_CTRL_FIELD_LEN
);
3383 memcpy(srf_mac
+1, cmd_data
->mac_list
.list
,
3384 (srf_size
- NAN_SRF_CTRL_FIELD_LEN
));
3385 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3386 WL_NAN_XTLV_CFG_SR_FILTER
, srf_size
, srf_mac
,
3387 BCM_XTLV_OPTION_ALIGN32
);
3388 if (unlikely(ret
)) {
3389 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
3393 } else if (cmd_data
->srf_type
== SRF_TYPE_BLOOM_FILTER
) {
3394 /* Create bloom filter */
3395 srf
= MALLOCZ(dhdp
->osh
, srf_ctrl_size
);
3397 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
3403 /* Instance id must be from 0 to 254, 255 is vendor specific */
3404 if (sd_params
->instance_id
<= NAN_ID_MIN
||
3405 sd_params
->instance_id
> (NAN_ID_MAX
- 1)) {
3406 WL_ERR(("Invalid instance id\n"));
3410 if (bloom_idx
== 0xFFFFFFFF) {
3411 bloom_idx
= sd_params
->instance_id
% 4;
3413 WL_ERR(("Invalid bloom_idx\n"));
3418 srf_control
|= bloom_idx
<< 2;
3420 ret
= wl_nan_bloom_create(&bp
, &bloom_idx
, bloom_len
);
3421 if (unlikely(ret
)) {
3422 WL_ERR(("%s: Bloom create failed\n", __FUNCTION__
));
3426 srftmp
= cmd_data
->mac_list
.list
;
3428 a
< cmd_data
->mac_list
.num_mac_addr
; a
++) {
3429 ret
= bcm_bloom_add_member(bp
, srftmp
, ETHER_ADDR_LEN
);
3430 if (unlikely(ret
)) {
3431 WL_ERR(("%s: Cannot add to bloom filter\n",
3435 srftmp
+= ETHER_ADDR_LEN
;
3438 memcpy(srf
, &srf_control
, NAN_SRF_CTRL_FIELD_LEN
);
3439 ret
= bcm_bloom_get_filter_data(bp
, bloom_len
,
3440 (srf
+ NAN_SRF_CTRL_FIELD_LEN
),
3442 if (unlikely(ret
)) {
3443 WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__
));
3446 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3447 WL_NAN_XTLV_CFG_SR_FILTER
, srf_ctrl_size
,
3448 srf
, BCM_XTLV_OPTION_ALIGN32
);
3449 if (ret
!= BCME_OK
) {
3453 WL_ERR(("Invalid SRF Type = %d !!!\n",
3454 cmd_data
->srf_type
));
3458 WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
3459 cmd_data
->mac_list
.num_mac_addr
));
3464 if (cmd_data
->rx_match
.dlen
) {
3465 WL_TRACE(("optional rx match filter is present, pack it\n"));
3466 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3467 WL_NAN_XTLV_CFG_MATCH_RX
, cmd_data
->rx_match
.dlen
,
3468 cmd_data
->rx_match
.data
, BCM_XTLV_OPTION_ALIGN32
);
3469 if (unlikely(ret
)) {
3470 WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__
));
3475 /* Security elements */
3476 if (cmd_data
->csid
) {
3477 WL_TRACE(("Cipher suite type is present, pack it\n"));
3478 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3479 WL_NAN_XTLV_CFG_SEC_CSID
, sizeof(nan_sec_csid_e
),
3480 (uint8
*)&cmd_data
->csid
, BCM_XTLV_OPTION_ALIGN32
);
3481 if (unlikely(ret
)) {
3482 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__
));
3487 if (cmd_data
->ndp_cfg
.security_cfg
) {
3488 if ((cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PMK
) ||
3489 (cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PASSPHRASE
)) {
3490 if (cmd_data
->key
.data
&& cmd_data
->key
.dlen
) {
3491 WL_TRACE(("optional pmk present, pack it\n"));
3492 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3493 WL_NAN_XTLV_CFG_SEC_PMK
, cmd_data
->key
.dlen
,
3494 cmd_data
->key
.data
, BCM_XTLV_OPTION_ALIGN32
);
3495 if (unlikely(ret
)) {
3496 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
3502 WL_ERR(("Invalid security key type\n"));
3508 if (cmd_data
->scid
.data
&& cmd_data
->scid
.dlen
) {
3509 WL_TRACE(("optional scid present, pack it\n"));
3510 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
, WL_NAN_XTLV_CFG_SEC_SCID
,
3511 cmd_data
->scid
.dlen
, cmd_data
->scid
.data
, BCM_XTLV_OPTION_ALIGN32
);
3512 if (unlikely(ret
)) {
3513 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__
));
3518 if (cmd_data
->sde_control_flag
) {
3519 ret
= bcm_pack_xtlv_entry(&pxtlv
, nan_buf_size
,
3520 WL_NAN_XTLV_SD_SDE_CONTROL
,
3521 sizeof(uint16
), (uint8
*)&cmd_data
->sde_control_flag
,
3522 BCM_XTLV_OPTION_ALIGN32
);
3523 if (ret
!= BCME_OK
) {
3524 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__
));
3529 sub_cmd
->len
+= (buflen_avail
- *nan_buf_size
);
3533 MFREE(dhdp
->osh
, srf
, srf_ctrl_size
);
3537 MFREE(dhdp
->osh
, srf_mac
, srf_size
);
3544 wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16
*data_size
, nan_discover_cmd_data_t
*cmd_data
)
3547 if (cmd_data
->svc_info
.dlen
)
3548 *data_size
+= ALIGN_SIZE(cmd_data
->svc_info
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3549 if (cmd_data
->sde_svc_info
.dlen
)
3550 *data_size
+= ALIGN_SIZE(cmd_data
->sde_svc_info
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3551 if (cmd_data
->tx_match
.dlen
)
3552 *data_size
+= ALIGN_SIZE(cmd_data
->tx_match
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3553 if (cmd_data
->rx_match
.dlen
)
3554 *data_size
+= ALIGN_SIZE(cmd_data
->rx_match
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3555 if (cmd_data
->use_srf
) {
3556 if (cmd_data
->srf_type
== SRF_TYPE_SEQ_MAC_ADDR
) {
3557 *data_size
+= (cmd_data
->mac_list
.num_mac_addr
* ETHER_ADDR_LEN
)
3558 + NAN_SRF_CTRL_FIELD_LEN
;
3559 } else { /* Bloom filter type */
3560 *data_size
+= NAN_BLOOM_LENGTH_DEFAULT
+ 1;
3562 *data_size
+= ALIGN_SIZE(*data_size
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3565 *data_size
+= ALIGN_SIZE(sizeof(nan_sec_csid_e
) + NAN_XTLV_ID_LEN_SIZE
, 4);
3566 if (cmd_data
->key
.dlen
)
3567 *data_size
+= ALIGN_SIZE(cmd_data
->key
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3568 if (cmd_data
->scid
.dlen
)
3569 *data_size
+= ALIGN_SIZE(cmd_data
->scid
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3570 if (cmd_data
->sde_control_flag
)
3571 *data_size
+= ALIGN_SIZE(sizeof(uint16
) + NAN_XTLV_ID_LEN_SIZE
, 4);
3572 if (cmd_data
->life_count
)
3573 *data_size
+= ALIGN_SIZE(sizeof(cmd_data
->life_count
) + NAN_XTLV_ID_LEN_SIZE
, 4);
3578 wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16
*data_size
, nan_datapath_cmd_data_t
*cmd_data
)
3581 if (cmd_data
->svc_info
.dlen
)
3582 *data_size
+= ALIGN_SIZE(cmd_data
->svc_info
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3583 if (cmd_data
->key
.dlen
)
3584 *data_size
+= ALIGN_SIZE(cmd_data
->key
.dlen
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3586 *data_size
+= ALIGN_SIZE(sizeof(nan_sec_csid_e
) + NAN_XTLV_ID_LEN_SIZE
, 4);
3588 *data_size
+= ALIGN_SIZE(WL_NAN_SVC_HASH_LEN
+ NAN_XTLV_ID_LEN_SIZE
, 4);
3592 wl_cfgnan_svc_get_handler(struct net_device
*ndev
,
3593 struct bcm_cfg80211
*cfg
, uint16 cmd_id
, nan_discover_cmd_data_t
*cmd_data
)
3595 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
3598 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3600 uint8
*resp_buf
= NULL
;
3601 uint16 data_size
= WL_NAN_OBUF_DATA_OFFSET
+ sizeof(instance_id
);
3605 nan_buf
= MALLOCZ(cfg
->osh
, data_size
);
3607 WL_ERR(("%s: memory allocation failed\n", __func__
));
3612 resp_buf
= MALLOCZ(cfg
->osh
, NAN_IOCTL_BUF_SIZE_LARGE
);
3614 WL_ERR(("%s: memory allocation failed\n", __func__
));
3618 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
3620 /* check if service is present */
3621 nan_buf
->is_set
= false;
3622 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
3623 if (cmd_id
== WL_NAN_CMD_SD_PUBLISH
) {
3624 instance_id
= cmd_data
->pub_id
;
3625 } else if (cmd_id
== WL_NAN_CMD_SD_SUBSCRIBE
) {
3626 instance_id
= cmd_data
->sub_id
;
3628 ret
= BCME_USAGE_ERROR
;
3629 WL_ERR(("wrong command id = %u\n", cmd_id
));
3632 /* Fill the sub_command block */
3633 sub_cmd
->id
= htod16(cmd_id
);
3634 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(instance_id
);
3635 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
3636 memcpy(sub_cmd
->data
, &instance_id
, sizeof(instance_id
));
3637 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, data_size
,
3638 &(cmd_data
->status
), resp_buf
, NAN_IOCTL_BUF_SIZE_LARGE
);
3640 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
3641 WL_ERR(("nan svc check failed ret = %d status = %d\n", ret
, cmd_data
->status
));
3644 WL_DBG(("nan svc check successful..proceed to update\n"));
3649 MFREE(cfg
->osh
, nan_buf
, data_size
);
3653 MFREE(cfg
->osh
, resp_buf
, NAN_IOCTL_BUF_SIZE_LARGE
);
3661 wl_cfgnan_svc_handler(struct net_device
*ndev
,
3662 struct bcm_cfg80211
*cfg
, uint16 cmd_id
, nan_discover_cmd_data_t
*cmd_data
)
3665 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3666 uint16 nan_buf_size
;
3667 uint8
*resp_buf
= NULL
;
3668 /* Considering fixed params */
3669 uint16 data_size
= WL_NAN_OBUF_DATA_OFFSET
+
3670 OFFSETOF(wl_nan_sd_params_t
, optional
[0]);
3672 if (cmd_data
->svc_update
) {
3673 ret
= wl_cfgnan_svc_get_handler(ndev
, cfg
, cmd_id
, cmd_data
);
3674 if (ret
!= BCME_OK
) {
3677 /* Ignoring any other svc get error */
3678 if (cmd_data
->status
== WL_NAN_E_BAD_INSTANCE
) {
3684 ret
= wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size
, cmd_data
);
3685 if (unlikely(ret
)) {
3686 WL_ERR(("Failed to get alligned size of optional params\n"));
3689 nan_buf_size
= data_size
;
3692 nan_buf
= MALLOCZ(cfg
->osh
, data_size
);
3694 WL_ERR(("%s: memory allocation failed\n", __func__
));
3699 resp_buf
= MALLOCZ(cfg
->osh
, data_size
+ NAN_IOVAR_NAME_SIZE
);
3701 WL_ERR(("%s: memory allocation failed\n", __func__
));
3705 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
3707 nan_buf
->is_set
= true;
3709 ret
= wl_cfgnan_sd_params_handler(ndev
, cmd_data
, cmd_id
,
3710 &nan_buf
->cmds
[0], &nan_buf_size
);
3711 if (unlikely(ret
)) {
3712 WL_ERR((" Service discovery params handler failed, ret = %d\n", ret
));
3717 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, data_size
,
3718 &(cmd_data
->status
), resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
3719 if (cmd_data
->svc_update
&& (cmd_data
->status
== BCME_DATA_NOTFOUND
)) {
3720 /* return OK if update tlv data is not present
3721 * which means nothing to update
3723 cmd_data
->status
= BCME_OK
;
3725 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
3726 WL_ERR(("nan svc failed ret = %d status = %d\n", ret
, cmd_data
->status
));
3729 WL_DBG(("nan svc successful\n"));
3730 #ifdef WL_NAN_DISC_CACHE
3731 if (!cmd_data
->svc_update
) { /* cache new service */
3732 ret
= wl_cfgnan_cache_svc_info(cfg
, cmd_data
, cmd_id
);
3734 WL_ERR(("%s: fail to cache svc info, ret=%d\n",
3735 __FUNCTION__
, ret
));
3739 WL_DBG(("skipping caching for update of svc %d\n", cmd_id
));
3741 #endif /* WL_NAN_DISC_CACHE */
3746 MFREE(cfg
->osh
, nan_buf
, data_size
);
3750 MFREE(cfg
->osh
, resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
3757 wl_cfgnan_publish_handler(struct net_device
*ndev
,
3758 struct bcm_cfg80211
*cfg
, nan_discover_cmd_data_t
*cmd_data
)
3765 * proceed only if mandatory arguments are present - subscriber id,
3768 if ((!cmd_data
->pub_id
) || (!cmd_data
->svc_hash
.data
) ||
3769 (!cmd_data
->svc_hash
.dlen
)) {
3770 WL_ERR(("mandatory arguments are not present\n"));
3775 ret
= wl_cfgnan_svc_handler(ndev
, cfg
, WL_NAN_CMD_SD_PUBLISH
, cmd_data
);
3777 WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__
, ret
));
3780 WL_INFORM_MEM(("[NAN] Service published for instance id:%d\n", cmd_data
->pub_id
));
3789 wl_cfgnan_subscribe_handler(struct net_device
*ndev
,
3790 struct bcm_cfg80211
*cfg
, nan_discover_cmd_data_t
*cmd_data
)
3797 * proceed only if mandatory arguments are present - subscriber id,
3800 if ((!cmd_data
->sub_id
) || (!cmd_data
->svc_hash
.data
) ||
3801 (!cmd_data
->svc_hash
.dlen
)) {
3802 WL_ERR(("mandatory arguments are not present\n"));
3807 ret
= wl_cfgnan_svc_handler(ndev
, cfg
, WL_NAN_CMD_SD_SUBSCRIBE
, cmd_data
);
3809 WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__
, ret
));
3812 WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d\n", cmd_data
->sub_id
));
3821 wl_cfgnan_cancel_handler(nan_discover_cmd_data_t
*cmd_data
,
3822 uint16 cmd_id
, void *p_buf
, uint16
*nan_buf_size
)
3828 if (p_buf
!= NULL
) {
3829 bcm_iov_batch_subcmd_t
*sub_cmd
= (bcm_iov_batch_subcmd_t
*)(p_buf
);
3830 wl_nan_instance_id_t instance_id
;
3832 if (cmd_id
== WL_NAN_CMD_SD_CANCEL_PUBLISH
) {
3833 instance_id
= cmd_data
->pub_id
;
3834 } else if (cmd_id
== WL_NAN_CMD_SD_CANCEL_SUBSCRIBE
) {
3835 instance_id
= cmd_data
->sub_id
;
3837 ret
= BCME_USAGE_ERROR
;
3838 WL_ERR(("wrong command id = %u\n", cmd_id
));
3842 /* Fill the sub_command block */
3843 sub_cmd
->id
= htod16(cmd_id
);
3844 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) + sizeof(instance_id
);
3845 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
3846 memcpy(sub_cmd
->data
, &instance_id
, sizeof(instance_id
));
3847 /* adjust iov data len to the end of last data record */
3848 *nan_buf_size
-= (sub_cmd
->len
+
3849 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
3850 WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id
));
3852 WL_ERR(("nan_iov_buf is NULL\n"));
3863 wl_cfgnan_cancel_pub_handler(struct net_device
*ndev
,
3864 struct bcm_cfg80211
*cfg
, nan_discover_cmd_data_t
*cmd_data
)
3866 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3868 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
3869 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
3870 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
3875 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
3877 WL_ERR(("%s: memory allocation failed\n", __func__
));
3882 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
3884 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
3886 /* proceed only if mandatory argument is present - publisher id */
3887 if (!cmd_data
->pub_id
) {
3888 WL_ERR(("mandatory argument is not present\n"));
3893 #ifdef WL_NAN_DISC_CACHE
3894 /* terminate ranging sessions for this svc */
3895 wl_cfgnan_terminate_ranging_sessions(ndev
, cfg
, cmd_data
->pub_id
);
3896 #endif /* WL_NAN_DISC_CACHE */
3897 ret
= wl_cfgnan_cancel_handler(cmd_data
, WL_NAN_CMD_SD_CANCEL_PUBLISH
,
3898 &nan_buf
->cmds
[0], &nan_buf_size
);
3899 if (unlikely(ret
)) {
3900 WL_ERR(("cancel publish failed\n"));
3903 nan_buf
->is_set
= true;
3906 memset(resp_buf
, 0, sizeof(resp_buf
));
3907 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
3908 &(cmd_data
->status
),
3909 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
3910 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
3911 WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
3912 ret
, cmd_data
->status
));
3915 WL_DBG(("nan cancel publish successfull\n"));
3918 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
3927 wl_cfgnan_cancel_sub_handler(struct net_device
*ndev
,
3928 struct bcm_cfg80211
*cfg
, nan_discover_cmd_data_t
*cmd_data
)
3930 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3932 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
3933 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
3934 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
3939 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
3941 WL_ERR(("%s: memory allocation failed\n", __func__
));
3946 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
3948 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
3950 /* proceed only if mandatory argument is present - subscriber id */
3951 if (!cmd_data
->sub_id
) {
3952 WL_ERR(("mandatory argument is not present\n"));
3957 #ifdef WL_NAN_DISC_CACHE
3958 /* terminate ranging sessions for this svc */
3959 wl_cfgnan_terminate_ranging_sessions(ndev
, cfg
, cmd_data
->sub_id
);
3960 wl_cfgnan_remove_disc_result(cfg
, cmd_data
->sub_id
);
3961 #endif /* WL_NAN_DISC_CACHE */
3963 ret
= wl_cfgnan_cancel_handler(cmd_data
, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE
,
3964 &nan_buf
->cmds
[0], &nan_buf_size
);
3965 if (unlikely(ret
)) {
3966 WL_ERR(("cancel subscribe failed\n"));
3969 nan_buf
->is_set
= true;
3972 memset(resp_buf
, 0, sizeof(resp_buf
));
3973 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
3974 &(cmd_data
->status
),
3975 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
3976 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
3977 WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
3978 ret
, cmd_data
->status
));
3981 WL_DBG(("subscribe cancel successfull\n"));
3984 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
3993 wl_cfgnan_transmit_handler(struct net_device
*ndev
,
3994 struct bcm_cfg80211
*cfg
, nan_discover_cmd_data_t
*cmd_data
)
3997 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
3998 wl_nan_sd_transmit_t
*sd_xmit
= NULL
;
3999 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
4000 bool is_lcl_id
= FALSE
;
4001 bool is_dest_id
= FALSE
;
4002 bool is_dest_mac
= FALSE
;
4003 uint16 buflen_avail
;
4005 uint16 nan_buf_size
;
4006 uint8
*resp_buf
= NULL
;
4007 /* Considering fixed params */
4008 uint16 data_size
= WL_NAN_OBUF_DATA_OFFSET
+
4009 OFFSETOF(wl_nan_sd_transmit_t
, opt_tlv
);
4010 data_size
= ALIGN_SIZE(data_size
, 4);
4011 ret
= wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size
, cmd_data
);
4012 if (unlikely(ret
)) {
4013 WL_ERR(("Failed to get alligned size of optional params\n"));
4018 nan_buf_size
= data_size
;
4019 nan_buf
= MALLOCZ(cfg
->osh
, data_size
);
4021 WL_ERR(("%s: memory allocation failed\n", __func__
));
4026 resp_buf
= MALLOCZ(cfg
->osh
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4028 WL_ERR(("%s: memory allocation failed\n", __func__
));
4034 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4036 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4038 * proceed only if mandatory arguments are present - subscriber id,
4039 * publisher id, mac address
4041 if ((!cmd_data
->local_id
) || (!cmd_data
->remote_id
) ||
4042 ETHER_ISNULLADDR(&cmd_data
->mac_addr
.octet
)) {
4043 WL_ERR(("mandatory arguments are not present\n"));
4048 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
4049 sd_xmit
= (wl_nan_sd_transmit_t
*)(sub_cmd
->data
);
4051 /* local instance id must be from 0 to 254, 255 is vendor specific */
4052 if (cmd_data
->local_id
<= NAN_ID_MIN
||
4053 cmd_data
->local_id
> (NAN_ID_MAX
- 1)) {
4054 WL_ERR(("Invalid local instance id\n"));
4058 sd_xmit
->local_service_id
= cmd_data
->local_id
;
4061 /* remote instance id must be from 0 to 254, 255 is vendor specific */
4062 if (cmd_data
->remote_id
<= NAN_ID_MIN
||
4063 cmd_data
->remote_id
> (NAN_ID_MAX
- 1)) {
4064 WL_ERR(("Invalid remote instance id\n"));
4069 sd_xmit
->requestor_service_id
= cmd_data
->remote_id
;
4072 if (!ETHER_ISNULLADDR(&cmd_data
->mac_addr
.octet
)) {
4073 memcpy(&sd_xmit
->destination_addr
, &cmd_data
->mac_addr
, ETHER_ADDR_LEN
);
4075 WL_ERR(("Invalid ether addr provided\n"));
4081 if (cmd_data
->priority
) {
4082 sd_xmit
->priority
= cmd_data
->priority
;
4084 sd_xmit
->token
= cmd_data
->token
;
4086 if (cmd_data
->recv_ind_flag
) {
4087 /* BIT0 - If set, host wont rec event "txs" */
4088 if (CHECK_BIT(cmd_data
->recv_ind_flag
,
4089 WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT
)) {
4090 sd_xmit
->flags
= WL_NAN_FUP_SUPR_EVT_TXS
;
4093 /* Optional parameters: fill the sub_command block with service descriptor attr */
4094 sub_cmd
->id
= htod16(WL_NAN_CMD_SD_TRANSMIT
);
4095 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
4096 OFFSETOF(wl_nan_sd_transmit_t
, opt_tlv
);
4097 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
4098 pxtlv
= (uint8
*)&sd_xmit
->opt_tlv
;
4100 nan_buf_size
-= (sub_cmd
->len
+
4101 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
4103 buflen_avail
= nan_buf_size
;
4105 if (cmd_data
->svc_info
.data
&& cmd_data
->svc_info
.dlen
) {
4106 bcm_xtlv_t
*pxtlv_svc_info
= (bcm_xtlv_t
*)pxtlv
;
4107 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4108 WL_NAN_XTLV_SD_SVC_INFO
, cmd_data
->svc_info
.dlen
,
4109 cmd_data
->svc_info
.data
, BCM_XTLV_OPTION_ALIGN32
);
4110 if (unlikely(ret
)) {
4111 WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
4112 __FUNCTION__
, ret
));
4116 /* 0xFF is max length for svc_info */
4117 if (pxtlv_svc_info
->len
> 0xFF) {
4118 WL_ERR(("Invalid service info length %d\n",
4119 (pxtlv_svc_info
->len
)));
4120 ret
= BCME_USAGE_ERROR
;
4123 sd_xmit
->opt_len
= (uint8
)(pxtlv_svc_info
->len
);
4125 if (cmd_data
->sde_svc_info
.data
&& cmd_data
->sde_svc_info
.dlen
) {
4126 WL_TRACE(("optional sdea svc_info present, pack it\n"));
4127 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4128 WL_NAN_XTLV_SD_SDE_SVC_INFO
, cmd_data
->sde_svc_info
.dlen
,
4129 cmd_data
->sde_svc_info
.data
, BCM_XTLV_OPTION_ALIGN32
);
4130 if (unlikely(ret
)) {
4131 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__
));
4136 /* Check if all mandatory params are provided */
4137 if (is_lcl_id
&& is_dest_id
&& is_dest_mac
) {
4139 sub_cmd
->len
+= (buflen_avail
- nan_buf_size
);
4141 WL_ERR(("Missing parameters\n"));
4142 ret
= BCME_USAGE_ERROR
;
4144 nan_buf
->is_set
= TRUE
;
4145 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, data_size
,
4146 &(cmd_data
->status
), resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4147 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
4148 WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
4149 sd_xmit
->token
, ret
, cmd_data
->status
));
4152 WL_MEM(("nan transmit successful for token %d\n", sd_xmit
->token
));
4155 MFREE(cfg
->osh
, nan_buf
, data_size
);
4158 MFREE(cfg
->osh
, resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4166 wl_cfgnan_get_capablities_handler(struct net_device
*ndev
,
4167 struct bcm_cfg80211
*cfg
, nan_hal_capabilities_t
*capabilities
)
4173 /* Populate get capability */
4174 capabilities
->max_concurrent_nan_clusters
= MAX_CONCURRENT_NAN_CLUSTERS
;
4175 capabilities
->max_publishes
= MAX_PUBLISHES
;
4176 capabilities
->max_subscribes
= MAX_SUBSCRIBES
;
4177 capabilities
->max_service_name_len
= MAX_SVC_NAME_LEN
;
4178 capabilities
->max_match_filter_len
= MAX_MATCH_FILTER_LEN
;
4179 capabilities
->max_total_match_filter_len
= MAX_TOTAL_MATCH_FILTER_LEN
;
4180 capabilities
->max_service_specific_info_len
= NAN_MAX_SERVICE_SPECIFIC_INFO_LEN
;
4181 capabilities
->max_ndi_interfaces
= MAX_NDI_INTERFACES
;
4182 capabilities
->max_ndp_sessions
= MAX_NDP_SESSIONS
;
4183 capabilities
->max_app_info_len
= MAX_APP_INFO_LEN
;
4184 capabilities
->max_queued_transmit_followup_msgs
= MAX_QUEUED_TX_FOLLOUP_MSGS
;
4185 capabilities
->max_sdea_service_specific_info_len
= MAX_SDEA_SVC_INFO_LEN
;
4186 capabilities
->max_subscribe_address
= MAX_SUBSCRIBE_ADDRESS
;
4187 capabilities
->cipher_suites_supported
= CIPHER_SUITE_SUPPORTED
;
4188 capabilities
->max_scid_len
= MAX_SCID_LEN
;
4189 capabilities
->is_ndp_security_supported
= true;
4190 capabilities
->ndp_supported_bands
= NDP_SUPPORTED_BANDS
;
4196 bool wl_cfgnan_check_state(struct bcm_cfg80211
*cfg
)
4198 return cfg
->nan_enable
;
4202 wl_cfgnan_init(struct bcm_cfg80211
*cfg
)
4205 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
4207 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
4208 uint8 buf
[NAN_IOCTL_BUF_SIZE
];
4209 bcm_iov_batch_buf_t
*nan_buf
= (bcm_iov_batch_buf_t
*)buf
;
4212 if (cfg
->nan_init_state
) {
4213 WL_ERR(("nan initialized/nmi exists\n"));
4216 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4218 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4219 ret
= wl_cfgnan_init_handler(&nan_buf
->cmds
[0], &nan_buf_size
, true);
4220 if (unlikely(ret
)) {
4221 WL_ERR(("init handler sub_cmd set failed\n"));
4225 nan_buf
->is_set
= true;
4227 memset(resp_buf
, 0, sizeof(resp_buf
));
4228 ret
= wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg
), cfg
,
4229 nan_buf
, nan_buf_size
, &status
,
4230 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
4231 if (unlikely(ret
) || unlikely(status
)) {
4232 WL_ERR(("nan init handler failed ret %d status %d\n",
4237 #ifdef WL_NAN_DISC_CACHE
4238 /* malloc for disc result */
4239 cfg
->nan_disc_cache
= MALLOCZ(cfg
->osh
,
4240 NAN_MAX_CACHE_DISC_RESULT
* sizeof(nan_disc_result_cache
));
4241 if (!cfg
->nan_disc_cache
) {
4242 WL_ERR(("%s: memory allocation failed\n", __func__
));
4246 #endif /* WL_NAN_DISC_CACHE */
4247 cfg
->nan_init_state
= true;
4255 wl_cfgnan_deinit(struct bcm_cfg80211
*cfg
, uint8 busstate
)
4258 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
4260 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
4261 uint8 buf
[NAN_IOCTL_BUF_SIZE
];
4262 bcm_iov_batch_buf_t
*nan_buf
= (bcm_iov_batch_buf_t
*)buf
;
4268 if (!cfg
->nan_init_state
) {
4269 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
4274 if (busstate
!= DHD_BUS_DOWN
) {
4275 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4277 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4279 WL_DBG(("nan deinit\n"));
4280 ret
= wl_cfgnan_init_handler(&nan_buf
->cmds
[0], &nan_buf_size
, false);
4281 if (unlikely(ret
)) {
4282 WL_ERR(("deinit handler sub_cmd set failed\n"));
4285 nan_buf
->is_set
= true;
4286 memset(resp_buf
, 0, sizeof(resp_buf
));
4287 ret
= wl_cfgnan_execute_ioctl(cfg
->wdev
->netdev
, cfg
,
4288 nan_buf
, nan_buf_size
, &status
,
4289 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
4290 if (unlikely(ret
) || unlikely(status
)) {
4291 WL_ERR(("nan init handler failed ret %d status %d\n",
4297 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
4298 /* clean NDI data */
4299 cfg
->nancfg
.ndi
[i
].in_use
= false;
4300 cfg
->nancfg
.ndi
[i
].created
= false;
4301 memset(&cfg
->nancfg
.ndi
[i
].ifname
, 0x0, IFNAMSIZ
);
4304 cfg
->nan_dp_mask
= 0;
4305 cfg
->nan_init_state
= false;
4306 #ifdef WL_NAN_DISC_CACHE
4307 if (cfg
->nan_disc_cache
) {
4308 for (i
= 0; i
< NAN_MAX_CACHE_DISC_RESULT
; i
++) {
4309 if (cfg
->nan_disc_cache
[i
].tx_match_filter
.data
) {
4310 MFREE(cfg
->osh
, cfg
->nan_disc_cache
[i
].tx_match_filter
.data
,
4311 cfg
->nan_disc_cache
[i
].tx_match_filter
.dlen
);
4313 if (cfg
->nan_disc_cache
[i
].svc_info
.data
) {
4314 MFREE(cfg
->osh
, cfg
->nan_disc_cache
[i
].svc_info
.data
,
4315 cfg
->nan_disc_cache
[i
].svc_info
.dlen
);
4318 MFREE(cfg
->osh
, cfg
->nan_disc_cache
,
4319 NAN_MAX_CACHE_DISC_RESULT
* sizeof(nan_disc_result_cache
));
4320 cfg
->nan_disc_cache
= NULL
;
4322 cfg
->nan_disc_count
= 0;
4323 memset(cfg
->svc_info
, 0, NAN_MAX_SVC_INST
* sizeof(nan_svc_info_t
));
4324 memset(cfg
->nan_ranging_info
, 0, NAN_MAX_RANGING_INST
* sizeof(nan_ranging_inst_t
));
4325 #endif /* WL_NAN_DISC_CACHE */
4327 if (!cfg
->nancfg
.mac_rand
) {
4328 wl_release_vif_macaddr(cfg
, cfg
->nan_nmi_mac
, WL_IF_TYPE_NAN_NMI
);
4336 wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211
*cfg
, u8
* mac_addr
)
4340 bool rand_mac
= cfg
->nancfg
.mac_rand
;
4344 /* ensure nmi != ndi */
4346 RANDOM_BYTES(mac_addr
, ETHER_ADDR_LEN
);
4347 /* restore mcast and local admin bits to 0 and 1 */
4348 ETHER_SET_UNICAST(mac_addr
);
4349 ETHER_SET_LOCALADDR(mac_addr
);
4351 if (i
== NAN_RAND_MAC_RETRIES
) {
4354 } while (eacmp(cfg
->nan_nmi_mac
, mac_addr
) == 0);
4356 if (i
== NAN_RAND_MAC_RETRIES
) {
4357 if (eacmp(cfg
->nan_nmi_mac
, mac_addr
) == 0) {
4358 WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
4359 ret
= BCME_NORESOURCE
;
4364 if (wl_get_vif_macaddr(cfg
, WL_IF_TYPE_NAN
,
4365 mac_addr
) != BCME_OK
) {
4376 wl_cfgnan_data_path_iface_create_delete_handler(struct net_device
*ndev
,
4377 struct bcm_cfg80211
*cfg
, char *ifname
, uint16 type
, uint8 busstate
)
4379 u8 mac_addr
[ETH_ALEN
];
4383 if (busstate
!= DHD_BUS_DOWN
) {
4384 if (type
== NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE
) {
4385 ret
= wl_cfgnan_get_ndi_macaddr(cfg
, mac_addr
);
4386 if (ret
!= BCME_OK
) {
4387 WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret
));
4390 if (wl_cfg80211_add_if(cfg
, ndev
, WL_IF_TYPE_NAN
,
4391 ifname
, mac_addr
) == NULL
) {
4395 } else if (type
== NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE
) {
4396 ret
= wl_cfg80211_del_if(cfg
, ndev
, NULL
, ifname
);
4400 WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret
));
4408 wl_cfgnan_data_path_request_handler(struct net_device
*ndev
,
4409 struct bcm_cfg80211
*cfg
, nan_datapath_cmd_data_t
*cmd_data
,
4410 uint8
*ndp_instance_id
)
4413 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
4414 wl_nan_dp_req_t
*datareq
= NULL
;
4415 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
4416 uint16 buflen_avail
;
4418 struct wireless_dev
*wdev
;
4420 uint16 nan_buf_size
;
4421 uint8
*resp_buf
= NULL
;
4422 /* Considering fixed params */
4423 uint16 data_size
= WL_NAN_OBUF_DATA_OFFSET
+
4424 OFFSETOF(wl_nan_dp_req_t
, tlv_params
);
4425 data_size
= ALIGN_SIZE(data_size
, 4);
4427 ret
= wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size
, cmd_data
);
4428 if (unlikely(ret
)) {
4429 WL_ERR(("Failed to get alligned size of optional params\n"));
4433 nan_buf_size
= data_size
;
4437 nan_buf
= MALLOCZ(cfg
->osh
, data_size
);
4439 WL_ERR(("%s: memory allocation failed\n", __func__
));
4444 resp_buf
= MALLOCZ(cfg
->osh
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4446 WL_ERR(("%s: memory allocation failed\n", __func__
));
4451 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
4452 cfg
, &cmd_data
->avail_params
, WL_AVAIL_LOCAL
);
4453 if (unlikely(ret
)) {
4454 WL_ERR(("Failed to set avail value with type local\n"));
4458 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
4459 cfg
, &cmd_data
->avail_params
, WL_AVAIL_NDC
);
4460 if (unlikely(ret
)) {
4461 WL_ERR(("Failed to set avail value with type ndc\n"));
4465 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4467 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4469 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
4470 datareq
= (wl_nan_dp_req_t
*)(sub_cmd
->data
);
4472 /* setting default data path type to unicast */
4473 datareq
->type
= WL_NAN_DP_TYPE_UNICAST
;
4475 if (cmd_data
->pub_id
) {
4476 datareq
->pub_id
= cmd_data
->pub_id
;
4479 if (!ETHER_ISNULLADDR(&cmd_data
->mac_addr
.octet
)) {
4480 memcpy(&datareq
->peer_mac
, &cmd_data
->mac_addr
, ETHER_ADDR_LEN
);
4482 WL_ERR(("Invalid ether addr provided\n"));
4487 /* Retrieve mac from given iface name */
4488 wdev
= wl_cfg80211_get_wdev_from_ifname(cfg
,
4489 (char *)cmd_data
->ndp_iface
);
4490 if (!wdev
|| ETHER_ISNULLADDR(wdev
->netdev
->dev_addr
)) {
4495 if (!ETHER_ISNULLADDR(wdev
->netdev
->dev_addr
)) {
4496 memcpy(&datareq
->ndi
, wdev
->netdev
->dev_addr
, ETHER_ADDR_LEN
);
4497 WL_TRACE(("%s: Retrieved ndi mac " MACDBG
"\n",
4498 __FUNCTION__
, MAC2STRDBG(datareq
->ndi
.octet
)));
4500 WL_ERR(("Invalid NDI addr retrieved\n"));
4505 datareq
->ndl_qos
.min_slots
= NAN_NDL_QOS_MIN_SLOT_NO_PREF
;
4506 datareq
->ndl_qos
.max_latency
= NAN_NDL_QOS_MAX_LAT_NO_PREF
;
4508 /* Fill the sub_command block */
4509 sub_cmd
->id
= htod16(WL_NAN_CMD_DATA_DATAREQ
);
4510 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
4511 OFFSETOF(wl_nan_dp_req_t
, tlv_params
);
4512 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
4513 pxtlv
= (uint8
*)&datareq
->tlv_params
;
4515 nan_buf_size
-= (sub_cmd
->len
+
4516 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
4517 buflen_avail
= nan_buf_size
;
4519 if (cmd_data
->svc_info
.data
&& cmd_data
->svc_info
.dlen
) {
4520 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4521 WL_NAN_XTLV_SD_SVC_INFO
, cmd_data
->svc_info
.dlen
,
4522 cmd_data
->svc_info
.data
,
4523 BCM_XTLV_OPTION_ALIGN32
);
4524 if (ret
!= BCME_OK
) {
4525 WL_ERR(("unable to process svc_spec_info: %d\n", ret
));
4528 datareq
->flags
|= WL_NAN_DP_FLAG_SVC_INFO
;
4531 /* Security elements */
4533 if (cmd_data
->csid
) {
4534 WL_TRACE(("Cipher suite type is present, pack it\n"));
4535 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4536 WL_NAN_XTLV_CFG_SEC_CSID
, sizeof(nan_sec_csid_e
),
4537 (uint8
*)&cmd_data
->csid
, BCM_XTLV_OPTION_ALIGN32
);
4538 if (unlikely(ret
)) {
4539 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__
));
4544 if (cmd_data
->ndp_cfg
.security_cfg
) {
4545 if ((cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PMK
) ||
4546 (cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PASSPHRASE
)) {
4547 if (cmd_data
->key
.data
&& cmd_data
->key
.dlen
) {
4548 WL_TRACE(("optional pmk present, pack it\n"));
4549 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4550 WL_NAN_XTLV_CFG_SEC_PMK
, cmd_data
->key
.dlen
,
4551 cmd_data
->key
.data
, BCM_XTLV_OPTION_ALIGN32
);
4552 if (unlikely(ret
)) {
4553 WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
4559 WL_ERR(("Invalid security key type\n"));
4564 if ((cmd_data
->svc_hash
.dlen
== WL_NAN_SVC_HASH_LEN
) &&
4565 (cmd_data
->svc_hash
.data
)) {
4566 WL_TRACE(("svc hash present, pack it\n"));
4567 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4568 WL_NAN_XTLV_CFG_SVC_HASH
, WL_NAN_SVC_HASH_LEN
,
4569 cmd_data
->svc_hash
.data
, BCM_XTLV_OPTION_ALIGN32
);
4570 if (ret
!= BCME_OK
) {
4571 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4576 #ifdef WL_NAN_DISC_CACHE
4577 /* check in cache */
4578 nan_disc_result_cache
*cache
;
4579 cache
= wl_cfgnan_get_disc_result(cfg
,
4580 datareq
->pub_id
, &datareq
->peer_mac
);
4583 WL_ERR(("invalid svc hash data or length = %d\n",
4584 cmd_data
->svc_hash
.dlen
));
4587 WL_TRACE(("svc hash present, pack it\n"));
4588 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4589 WL_NAN_XTLV_CFG_SVC_HASH
, WL_NAN_SVC_HASH_LEN
,
4590 cache
->svc_hash
, BCM_XTLV_OPTION_ALIGN32
);
4591 if (ret
!= BCME_OK
) {
4592 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4598 WL_ERR(("invalid svc hash data or length = %d\n",
4599 cmd_data
->svc_hash
.dlen
));
4601 #endif /* WL_NAN_DISC_CACHE */
4603 /* If the Data req is for secure data connection */
4604 datareq
->flags
|= WL_NAN_DP_FLAG_SECURITY
;
4607 sub_cmd
->len
+= (buflen_avail
- nan_buf_size
);
4608 nan_buf
->is_set
= false;
4611 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, data_size
,
4612 &(cmd_data
->status
), resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4613 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
4614 WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
4615 ret
, cmd_data
->status
));
4619 /* check the response buff */
4620 if (ret
== BCME_OK
) {
4621 ret
= process_resp_buf(resp_buf
+ WL_NAN_OBUF_DATA_OFFSET
,
4622 ndp_instance_id
, WL_NAN_CMD_DATA_DATAREQ
);
4623 cmd_data
->ndp_instance_id
= *ndp_instance_id
;
4625 WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
4626 cmd_data
->ndp_instance_id
));
4630 MFREE(cfg
->osh
, nan_buf
, data_size
);
4634 MFREE(cfg
->osh
, resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4643 wl_cfgnan_data_path_response_handler(struct net_device
*ndev
,
4644 struct bcm_cfg80211
*cfg
, nan_datapath_cmd_data_t
*cmd_data
)
4647 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
4648 wl_nan_dp_resp_t
*dataresp
= NULL
;
4649 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
4650 uint16 buflen_avail
;
4652 struct wireless_dev
*wdev
;
4653 uint16 nan_buf_size
;
4654 uint8
*resp_buf
= NULL
;
4656 /* Considering fixed params */
4657 uint16 data_size
= WL_NAN_OBUF_DATA_OFFSET
+
4658 OFFSETOF(wl_nan_dp_resp_t
, tlv_params
);
4659 data_size
= ALIGN_SIZE(data_size
, 4);
4660 ret
= wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size
, cmd_data
);
4661 if (unlikely(ret
)) {
4662 WL_ERR(("Failed to get alligned size of optional params\n"));
4665 nan_buf_size
= data_size
;
4670 nan_buf
= MALLOCZ(cfg
->osh
, data_size
);
4672 WL_ERR(("%s: memory allocation failed\n", __func__
));
4677 resp_buf
= MALLOCZ(cfg
->osh
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4679 WL_ERR(("%s: memory allocation failed\n", __func__
));
4684 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
4685 cfg
, &cmd_data
->avail_params
, WL_AVAIL_LOCAL
);
4686 if (unlikely(ret
)) {
4687 WL_ERR(("Failed to set avail value with type local\n"));
4691 ret
= wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg
),
4692 cfg
, &cmd_data
->avail_params
, WL_AVAIL_NDC
);
4693 if (unlikely(ret
)) {
4694 WL_ERR(("Failed to set avail value with type ndc\n"));
4698 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4700 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4702 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
4703 dataresp
= (wl_nan_dp_resp_t
*)(sub_cmd
->data
);
4705 /* Setting default data path type to unicast */
4706 dataresp
->type
= WL_NAN_DP_TYPE_UNICAST
;
4707 /* Changing status value as per fw convention */
4708 dataresp
->status
= cmd_data
->rsp_code
^= 1;
4709 dataresp
->reason_code
= 0;
4711 /* ndp instance id must be from 0 to 255 */
4712 if (cmd_data
->ndp_instance_id
<= NAN_ID_MIN
||
4713 cmd_data
->ndp_instance_id
> NAN_ID_MAX
) {
4714 WL_ERR(("Invalid ndp instance id\n"));
4718 dataresp
->ndp_id
= cmd_data
->ndp_instance_id
;
4720 /* Retrieved initiator ndi from NanDataPathRequestInd */
4721 if (!ETHER_ISNULLADDR(&cfg
->initiator_ndi
.octet
)) {
4722 memcpy(&dataresp
->mac_addr
, &cfg
->initiator_ndi
, ETHER_ADDR_LEN
);
4724 WL_ERR(("Invalid ether addr retrieved\n"));
4729 /* Retrieve mac from given iface name */
4730 wdev
= wl_cfg80211_get_wdev_from_ifname(cfg
,
4731 (char *)cmd_data
->ndp_iface
);
4732 if (!wdev
|| ETHER_ISNULLADDR(wdev
->netdev
->dev_addr
)) {
4737 if (!ETHER_ISNULLADDR(wdev
->netdev
->dev_addr
)) {
4738 memcpy(&dataresp
->ndi
, wdev
->netdev
->dev_addr
, ETHER_ADDR_LEN
);
4739 WL_TRACE(("%s: Retrieved ndi mac " MACDBG
"\n",
4740 __FUNCTION__
, MAC2STRDBG(dataresp
->ndi
.octet
)));
4742 WL_ERR(("Invalid NDI addr retrieved\n"));
4747 dataresp
->ndl_qos
.min_slots
= NAN_NDL_QOS_MIN_SLOT_NO_PREF
;
4748 dataresp
->ndl_qos
.max_latency
= NAN_NDL_QOS_MAX_LAT_NO_PREF
;
4750 /* Fill the sub_command block */
4751 sub_cmd
->id
= htod16(WL_NAN_CMD_DATA_DATARESP
);
4752 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
4753 OFFSETOF(wl_nan_dp_resp_t
, tlv_params
);
4754 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
4755 pxtlv
= (uint8
*)&dataresp
->tlv_params
;
4757 nan_buf_size
-= (sub_cmd
->len
+
4758 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
4759 buflen_avail
= nan_buf_size
;
4761 if (cmd_data
->svc_info
.data
&& cmd_data
->svc_info
.dlen
) {
4762 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4763 WL_NAN_XTLV_SD_SVC_INFO
, cmd_data
->svc_info
.dlen
,
4764 cmd_data
->svc_info
.data
,
4765 BCM_XTLV_OPTION_ALIGN32
);
4766 if (ret
!= BCME_OK
) {
4767 WL_ERR(("unable to process svc_spec_info: %d\n", ret
));
4770 dataresp
->flags
|= WL_NAN_DP_FLAG_SVC_INFO
;
4773 /* Security elements */
4774 if (cmd_data
->csid
) {
4775 WL_TRACE(("Cipher suite type is present, pack it\n"));
4776 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4777 WL_NAN_XTLV_CFG_SEC_CSID
, sizeof(nan_sec_csid_e
),
4778 (uint8
*)&cmd_data
->csid
, BCM_XTLV_OPTION_ALIGN32
);
4779 if (unlikely(ret
)) {
4780 WL_ERR(("%s: fail to pack csid\n", __FUNCTION__
));
4785 if (cmd_data
->ndp_cfg
.security_cfg
) {
4786 if ((cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PMK
) ||
4787 (cmd_data
->key_type
== NAN_SECURITY_KEY_INPUT_PASSPHRASE
)) {
4788 if (cmd_data
->key
.data
&& cmd_data
->key
.dlen
) {
4789 WL_TRACE(("optional pmk present, pack it\n"));
4790 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4791 WL_NAN_XTLV_CFG_SEC_PMK
, cmd_data
->key
.dlen
,
4792 cmd_data
->key
.data
, BCM_XTLV_OPTION_ALIGN32
);
4793 if (unlikely(ret
)) {
4794 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
4800 WL_ERR(("Invalid security key type\n"));
4805 if ((cmd_data
->svc_hash
.dlen
== WL_NAN_SVC_HASH_LEN
) &&
4806 (cmd_data
->svc_hash
.data
)) {
4807 WL_TRACE(("svc hash present, pack it\n"));
4808 ret
= bcm_pack_xtlv_entry(&pxtlv
, &nan_buf_size
,
4809 WL_NAN_XTLV_CFG_SVC_HASH
, WL_NAN_SVC_HASH_LEN
,
4810 cmd_data
->svc_hash
.data
,
4811 BCM_XTLV_OPTION_ALIGN32
);
4812 if (ret
!= BCME_OK
) {
4813 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4818 /* If the Data resp is for secure data connection */
4819 dataresp
->flags
|= WL_NAN_DP_FLAG_SECURITY
;
4822 sub_cmd
->len
+= (buflen_avail
- nan_buf_size
);
4824 nan_buf
->is_set
= false;
4826 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, data_size
,
4827 &(cmd_data
->status
), resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4828 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
4829 WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
4830 ret
, cmd_data
->status
));
4834 WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp
->ndp_id
));
4838 MFREE(cfg
->osh
, nan_buf
, data_size
);
4842 MFREE(cfg
->osh
, resp_buf
, data_size
+ NAN_IOVAR_NAME_SIZE
);
4850 int wl_cfgnan_data_path_end_handler(struct net_device
*ndev
,
4851 struct bcm_cfg80211
*cfg
, nan_datapath_cmd_data_t
*cmd_data
)
4853 bcm_iov_batch_buf_t
*nan_buf
= NULL
;
4854 wl_nan_dp_end_t
*dataend
= NULL
;
4855 bcm_iov_batch_subcmd_t
*sub_cmd
= NULL
;
4857 uint16 nan_buf_size
= NAN_IOCTL_BUF_SIZE
;
4858 uint8 resp_buf
[NAN_IOCTL_BUF_SIZE
];
4860 dhd_pub_t
*dhdp
= wl_cfg80211_get_dhdp(ndev
);
4866 WL_ERR(("bus is already down, hence blocking nan dp end\n"));
4871 if (!cfg
->nan_enable
) {
4872 WL_ERR(("nan is not enabled, nan dp end blocked\n"));
4877 /* ndp instance id must be from 0 to 255 */
4878 if (cmd_data
->ndp_instance_id
<= NAN_ID_MIN
||
4879 cmd_data
->ndp_instance_id
> NAN_ID_MAX
) {
4880 WL_ERR(("Invalid ndp instance id\n"));
4885 nan_buf
= MALLOCZ(dhdp
->osh
, nan_buf_size
);
4887 WL_ERR(("%s: memory allocation failed\n", __func__
));
4892 nan_buf
->version
= htol16(WL_NAN_IOV_BATCH_VERSION
);
4894 nan_buf_size
-= OFFSETOF(bcm_iov_batch_buf_t
, cmds
[0]);
4896 sub_cmd
= (bcm_iov_batch_subcmd_t
*)(&nan_buf
->cmds
[0]);
4897 dataend
= (wl_nan_dp_end_t
*)(sub_cmd
->data
);
4899 /* Fill sub_cmd block */
4900 sub_cmd
->id
= htod16(WL_NAN_CMD_DATA_DATAEND
);
4901 sub_cmd
->len
= sizeof(sub_cmd
->u
.options
) +
4903 sub_cmd
->u
.options
= htol32(BCM_XTLV_OPTION_ALIGN32
);
4905 dataend
->lndp_id
= cmd_data
->ndp_instance_id
;
4908 * Currently fw requires ndp_id and reason to end the data path
4909 * But wifi_nan.h takes ndp_instances_count and ndp_id.
4910 * Will keep reason = accept always.
4913 dataend
->status
= 1;
4915 nan_buf
->is_set
= true;
4918 nan_buf_size
-= (sub_cmd
->len
+
4919 OFFSETOF(bcm_iov_batch_subcmd_t
, u
.options
));
4920 memset(resp_buf
, 0, sizeof(resp_buf
));
4921 ret
= wl_cfgnan_execute_ioctl(ndev
, cfg
, nan_buf
, nan_buf_size
,
4922 &(cmd_data
->status
),
4923 (void*)resp_buf
, NAN_IOCTL_BUF_SIZE
);
4924 if (unlikely(ret
) || unlikely(cmd_data
->status
)) {
4925 WL_ERR(("nan data path end handler failed, error = %d status %d\n",
4926 ret
, cmd_data
->status
));
4929 WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
4934 MFREE(dhdp
->osh
, nan_buf
, NAN_IOCTL_BUF_SIZE
);
4942 #ifdef WL_NAN_DISC_CACHE
4943 int wl_cfgnan_sec_info_handler(struct bcm_cfg80211
*cfg
,
4944 nan_datapath_sec_info_cmd_data_t
*cmd_data
, nan_hal_resp_t
*nan_req_resp
)
4946 s32 ret
= BCME_NOTFOUND
;
4947 /* check in cache */
4948 nan_disc_result_cache
*disc_cache
= NULL
;
4949 nan_svc_info_t
*svc_info
= NULL
;
4954 if (!cfg
->nan_init_state
) {
4955 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
4956 ret
= BCME_NOTENABLED
;
4960 /* datapath request context */
4961 if (cmd_data
->pub_id
&& !ETHER_ISNULLADDR(&cmd_data
->mac_addr
)) {
4962 disc_cache
= wl_cfgnan_get_disc_result(cfg
,
4963 cmd_data
->pub_id
, &cmd_data
->mac_addr
);
4965 WL_TRACE(("svc hash present, pack it\n"));
4966 memcpy(nan_req_resp
->svc_hash
, disc_cache
->svc_hash
, WL_NAN_SVC_HASH_LEN
);
4970 WL_ERR(("Missing mandatory info..pub id %d & pub_mac "MACDBG
"\n",
4971 cmd_data
->pub_id
, MAC2STRDBG(cmd_data
->mac_addr
.octet
)));
4975 /* datapath response context */
4976 if (cmd_data
->ndp_instance_id
) {
4977 svc_info
= wl_cfgnan_get_svc_inst(cfg
, 0, cmd_data
->ndp_instance_id
);
4978 /* Note: svc_info will not be present in OOB cases
4979 * In such case send NMI alone and let HAL handle if
4980 * svc_hash is mandatory
4983 WL_TRACE(("svc hash present, pack it\n"));
4984 memcpy(nan_req_resp
->svc_hash
, svc_info
->svc_hash
, WL_NAN_SVC_HASH_LEN
);
4986 WL_MEM(("svc_info not present..assuming OOB DP\n"));
4988 /* Always send NMI */
4989 memcpy(nan_req_resp
->pub_nmi
, cfg
->nan_nmi_mac
, ETHER_ADDR_LEN
);
4992 WL_ERR(("Invalid ndp id\n"));
5000 static s32
wl_nan_cache_to_event_data(nan_disc_result_cache
*cache
,
5001 nan_event_data_t
*nan_event_data
, osl_t
*osh
)
5006 nan_event_data
->pub_id
= cache
->pub_id
;
5007 nan_event_data
->sub_id
= cache
->sub_id
;
5008 nan_event_data
->publish_rssi
= cache
->publish_rssi
;
5009 nan_event_data
->peer_cipher_suite
= cache
->peer_cipher_suite
;
5010 memcpy(&nan_event_data
->remote_nmi
, &cache
->peer
, ETHER_ADDR_LEN
);
5012 if (cache
->svc_info
.dlen
&& cache
->svc_info
.data
) {
5013 nan_event_data
->svc_info
.dlen
= cache
->svc_info
.dlen
;
5014 nan_event_data
->svc_info
.data
=
5015 MALLOCZ(osh
, nan_event_data
->svc_info
.dlen
);
5016 if (!nan_event_data
->svc_info
.data
) {
5017 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
5018 nan_event_data
->svc_info
.dlen
= 0;
5022 memcpy(nan_event_data
->svc_info
.data
,
5023 cache
->svc_info
.data
, cache
->svc_info
.dlen
);
5025 if (cache
->tx_match_filter
.dlen
&& cache
->tx_match_filter
.data
) {
5026 nan_event_data
->tx_match_filter
.dlen
= cache
->tx_match_filter
.dlen
;
5027 nan_event_data
->tx_match_filter
.data
=
5028 MALLOCZ(osh
, nan_event_data
->tx_match_filter
.dlen
);
5029 if (!nan_event_data
->tx_match_filter
.data
) {
5030 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
5031 nan_event_data
->tx_match_filter
.dlen
= 0;
5035 memcpy(nan_event_data
->tx_match_filter
.data
,
5036 cache
->tx_match_filter
.data
, cache
->tx_match_filter
.dlen
);
5042 #endif /* WL_NAN_DISC_CACHE */
5044 wl_nan_dp_cmn_event_data(struct bcm_cfg80211
*cfg
, void *event_data
,
5045 uint16 data_len
, uint16
*tlvs_offset
,
5046 uint16
*nan_opts_len
, uint32 event_num
,
5047 int *hal_event_id
, nan_event_data_t
*nan_event_data
)
5051 wl_nan_ev_datapath_cmn_t
*ev_dp
;
5052 nan_svc_info_t
*svc_info
;
5053 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5054 if (xtlv
->id
== WL_NAN_XTLV_DATA_DP_INFO
) {
5055 ev_dp
= (wl_nan_ev_datapath_cmn_t
*)xtlv
->data
;
5058 BCM_REFERENCE(svc_info
);
5060 /* Mapping to common struct between DHD and HAL */
5061 WL_TRACE(("Event type: %d\n", ev_dp
->type
));
5062 nan_event_data
->type
= ev_dp
->type
;
5063 WL_TRACE(("pub_id: %d\n", ev_dp
->pub_id
));
5064 nan_event_data
->pub_id
= ev_dp
->pub_id
;
5065 WL_TRACE(("security: %d\n", ev_dp
->security
));
5066 nan_event_data
->security
= ev_dp
->security
;
5068 /* Store initiator_ndi, required for data_path_response_request */
5069 memcpy(&cfg
->initiator_ndi
, &ev_dp
->initiator_ndi
,
5071 if (ev_dp
->type
== NAN_DP_SESSION_UNICAST
) {
5072 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp
->ndp_id
));
5073 nan_event_data
->ndp_id
= ev_dp
->ndp_id
;
5074 WL_TRACE(("INITIATOR_NDI: " MACDBG
"\n",
5075 MAC2STRDBG(ev_dp
->initiator_ndi
.octet
)));
5076 WL_TRACE(("RESPONDOR_NDI: " MACDBG
"\n",
5077 MAC2STRDBG(ev_dp
->responder_ndi
.octet
)));
5078 WL_TRACE(("PEER NMI: " MACDBG
"\n",
5079 MAC2STRDBG(ev_dp
->peer_nmi
.octet
)));
5080 memcpy(&nan_event_data
->remote_nmi
, &ev_dp
->peer_nmi
,
5083 /* type is multicast */
5084 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp
->mc_id
));
5085 nan_event_data
->ndp_id
= ev_dp
->mc_id
;
5086 WL_TRACE(("PEER NMI: " MACDBG
"\n",
5087 MAC2STRDBG(ev_dp
->peer_nmi
.octet
)));
5088 memcpy(&nan_event_data
->remote_nmi
, &ev_dp
->peer_nmi
,
5091 *tlvs_offset
= OFFSETOF(wl_nan_ev_datapath_cmn_t
, opt_tlvs
) +
5092 OFFSETOF(bcm_xtlv_t
, data
);
5093 *nan_opts_len
= data_len
- *tlvs_offset
;
5094 if (event_num
== WL_NAN_EVENT_PEER_DATAPATH_IND
) {
5095 *hal_event_id
= GOOGLE_NAN_EVENT_DATA_REQUEST
;
5096 #ifdef WL_NAN_DISC_CACHE
5097 svc_info
= wl_cfgnan_get_svc_inst(cfg
, nan_event_data
->pub_id
, 0);
5099 for (i
= 0; i
< NAN_MAX_SVC_INST
; i
++) {
5100 if (!svc_info
->ndp_id
[i
]) {
5101 WL_TRACE(("Found empty field\n"));
5105 if (i
== NAN_MAX_SVC_INST
) {
5106 WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__
));
5107 ret
= BCME_NORESOURCE
;
5110 svc_info
->ndp_id
[i
] = nan_event_data
->ndp_id
;
5113 #endif /* WL_NAN_DISC_CACHE */
5114 } else if (event_num
== WL_NAN_EVENT_DATAPATH_ESTB
) {
5115 *hal_event_id
= GOOGLE_NAN_EVENT_DATA_CONFIRMATION
;
5116 if (ev_dp
->role
== NAN_DP_ROLE_INITIATOR
) {
5117 memcpy(&nan_event_data
->responder_ndi
, &ev_dp
->responder_ndi
,
5119 WL_TRACE(("REMOTE_NDI: " MACDBG
"\n",
5120 MAC2STRDBG(ev_dp
->responder_ndi
.octet
)));
5121 WL_TRACE(("Initiator status %d\n", nan_event_data
->status
));
5123 memcpy(&nan_event_data
->responder_ndi
, &ev_dp
->initiator_ndi
,
5125 WL_TRACE(("REMOTE_NDI: " MACDBG
"\n",
5126 MAC2STRDBG(ev_dp
->initiator_ndi
.octet
)));
5128 if (ev_dp
->status
== NAN_NDP_STATUS_ACCEPT
) {
5129 nan_event_data
->status
= NAN_DP_REQUEST_ACCEPT
;
5130 } else if (ev_dp
->status
== NAN_NDP_STATUS_REJECT
) {
5131 nan_event_data
->status
= NAN_DP_REQUEST_REJECT
;
5133 WL_ERR(("%s:Status code = %x not expected\n",
5134 __FUNCTION__
, ev_dp
->status
));
5138 WL_TRACE(("Responder status %d\n", nan_event_data
->status
));
5139 wl_cfgnan_update_dp_mask(cfg
, true, nan_event_data
->ndp_id
);
5140 } else if (event_num
== WL_NAN_EVENT_DATAPATH_END
) {
5141 /* Mapping to common struct between DHD and HAL */
5142 *hal_event_id
= GOOGLE_NAN_EVENT_DATA_END
;
5143 wl_cfgnan_update_dp_mask(cfg
, false, nan_event_data
->ndp_id
);
5144 #ifdef WL_NAN_DISC_CACHE
5145 if (ev_dp
->role
!= NAN_DP_ROLE_INITIATOR
) {
5146 /* Only at Responder side,
5148 * clear the resp ndp id from the svc info cache
5150 svc_info
= wl_cfgnan_get_svc_inst(cfg
, 0, nan_event_data
->ndp_id
);
5152 for (i
= 0; i
< NAN_MAX_SVC_INST
; i
++) {
5153 if (svc_info
->ndp_id
[i
] == nan_event_data
->ndp_id
) {
5154 svc_info
->ndp_id
[i
] = 0;
5158 WL_DBG(("couldn't find entry for ndp id = %d\n",
5159 nan_event_data
->ndp_id
));
5162 #endif /* WL_NAN_DISC_CACHE */
5165 /* Follow though, not handling other IDs as of now */
5166 WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__
, xtlv
->id
));
5174 wl_nan_print_status(wl_nan_conf_status_t
*nstatus
)
5176 printf("> enabled: %d\n", nstatus
->enabled
);
5177 printf("> Current NMI: " MACDBG
"\n", MAC2STRDBG(nstatus
->nmi
.octet
));
5178 printf("> Current cluster_id: " MACDBG
"\n", MAC2STRDBG(nstatus
->cid
.octet
));
5180 switch (nstatus
->role
) {
5181 case WL_NAN_ROLE_AUTO
:
5182 printf("> role: %s (%d)\n", "auto", nstatus
->role
);
5184 case WL_NAN_ROLE_NON_MASTER_NON_SYNC
:
5185 printf("> role: %s (%d)\n", "non-master-non-sync", nstatus
->role
);
5187 case WL_NAN_ROLE_NON_MASTER_SYNC
:
5188 printf("> role: %s (%d)\n", "non-master-sync", nstatus
->role
);
5190 case WL_NAN_ROLE_MASTER
:
5191 printf("> role: %s (%d)\n", "master", nstatus
->role
);
5193 case WL_NAN_ROLE_ANCHOR_MASTER
:
5194 printf("> role: %s (%d)\n", "anchor-master", nstatus
->role
);
5197 printf("> role: %s (%d)\n", "undefined", nstatus
->role
);
5201 printf("> social channels: %d, %d\n",
5202 nstatus
->social_chans
[0], nstatus
->social_chans
[1]);
5203 printf("> master_rank: " NMRSTR
"\n", NMR2STR(nstatus
->mr
));
5204 printf("> amr : " NMRSTR
"\n", NMR2STR(nstatus
->amr
));
5205 printf("> hop_count: %d\n", nstatus
->hop_count
);
5206 printf("> ambtt: %d\n", nstatus
->ambtt
);
5210 wl_cfgnan_notify_nan_status(struct bcm_cfg80211
*cfg
,
5211 bcm_struct_cfgdev
*cfgdev
, const wl_event_msg_t
*event
, void *event_data
)
5216 int hal_event_id
= 0;
5217 nan_event_data_t
*nan_event_data
= NULL
;
5218 nan_parse_event_ctx_t nan_event_ctx
;
5219 uint16 tlvs_offset
= 0;
5220 uint16 nan_opts_len
= 0;
5223 bcm_xtlv_opts_t xtlv_opt
= BCM_IOV_CMD_OPT_ALIGN32
;
5224 nan_svc_info_t
*svc
;
5226 UNUSED_PARAMETER(wl_nan_print_status
);
5230 if (!cfg
->nan_init_state
) {
5231 WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
5235 if (!event
|| !event_data
) {
5236 WL_ERR(("event data is NULL\n"));
5241 event_type
= ntoh32(event
->event_type
);
5242 event_num
= ntoh32(event
->reason
);
5243 data_len
= ntoh32(event
->datalen
);
5244 nan_event_data
= MALLOCZ(cfg
->osh
, sizeof(*nan_event_data
));
5245 if (!nan_event_data
) {
5246 WL_ERR(("%s: memory allocation failed\n", __func__
));
5250 if (NAN_INVALID_EVENT(event_num
)) {
5251 WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num
, event_type
));
5255 WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
5256 nan_event_to_str(event_num
), event_num
, data_len
));
5259 prhex("nan_event_data:", event_data
, data_len
);
5260 #endif /* WL_NAN_DEBUG */
5262 nan_event_ctx
.cfg
= cfg
;
5263 nan_event_ctx
.nan_evt_data
= nan_event_data
;
5265 * send as preformatted hex string
5266 * EVENT_NAN <event_type> <tlv_hex_string>
5268 switch (event_num
) {
5269 case WL_NAN_EVENT_START
:
5270 case WL_NAN_EVENT_MERGE
:
5271 case WL_NAN_EVENT_ROLE
: {
5272 /* get nan status info as-is */
5273 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5274 wl_nan_conf_status_t
*nstatus
= (wl_nan_conf_status_t
*)xtlv
->data
;
5275 WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
5276 nan_event_to_str(event_num
), event_num
, data_len
));
5277 WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus
->role
)));
5278 /* Mapping to common struct between DHD and HAL */
5279 nan_event_data
->enabled
= nstatus
->enabled
;
5280 memcpy(&nan_event_data
->local_nmi
, &nstatus
->nmi
,
5282 memcpy(&nan_event_data
->clus_id
, &nstatus
->cid
,
5284 nan_event_data
->nan_de_evt_type
= event_num
;
5286 wl_nan_print_status(nstatus
);
5287 #endif /* WL_NAN_DEBUG */
5288 if (event_num
== WL_NAN_EVENT_START
) {
5289 cfg
->nan_enable
= true;
5291 cfg
->nancfg
.nan_event_recvd
= true;
5293 wake_up(&cfg
->nancfg
.nan_event_wait
);
5295 hal_event_id
= GOOGLE_NAN_EVENT_DE_EVENT
;
5299 case WL_NAN_EVENT_STOP
: {
5300 WL_INFORM_MEM((">> Nan Mac Stop Event Received\n"));
5301 hal_event_id
= GOOGLE_NAN_EVENT_DISABLED
;
5303 cfg
->nancfg
.nan_event_recvd
= true;
5305 wake_up(&cfg
->nancfg
.nan_event_wait
);
5306 cfg
->nancfg
.inst_id_start
= 0;
5307 memset(cfg
->nancfg
.svc_inst_id_mask
, 0, sizeof(cfg
->nancfg
.svc_inst_id_mask
));
5308 memset(cfg
->svc_info
, 0, NAN_MAX_SVC_INST
* sizeof(nan_svc_info_t
));
5309 if (cfg
->nancfg
.disable_reason
== NAN_USER_INITIATED
) {
5310 /* do not event to host if command is from host */
5312 } else if (cfg
->nancfg
.disable_reason
== NAN_CONCURRENCY_CONFLICT
) {
5313 nan_event_data
->status
= NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED
;
5315 nan_event_data
->status
= NAN_STATUS_SUCCESS
;
5319 case WL_NAN_EVENT_TERMINATED
: {
5320 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5321 wl_nan_ev_terminated_t
*pev
= (wl_nan_ev_terminated_t
*)xtlv
->data
;
5323 /* Mapping to common struct between DHD and HAL */
5324 WL_TRACE(("Instance ID: %d\n", pev
->instance_id
));
5325 nan_event_data
->local_inst_id
= pev
->instance_id
;
5326 WL_TRACE(("Service Type: %d\n", pev
->svctype
));
5328 #ifdef WL_NAN_DISC_CACHE
5329 if (pev
->svctype
== NAN_SC_SUBSCRIBE
) {
5330 wl_cfgnan_remove_disc_result(cfg
, pev
->instance_id
);
5332 #endif /* WL_NAN_DISC_CACHE */
5333 /* Mapping reason code of FW to status code of framework */
5334 if (pev
->reason
== NAN_TERM_REASON_TIMEOUT
||
5335 pev
->reason
== NAN_TERM_REASON_USER_REQ
||
5336 pev
->reason
== NAN_TERM_REASON_COUNT_REACHED
) {
5337 nan_event_data
->status
= NAN_STATUS_SUCCESS
;
5338 memcpy(nan_event_data
->nan_reason
, "NAN_STATUS_SUCCESS",
5339 strlen("NAN_STATUS_SUCCESS"));
5341 nan_event_data
->status
= NAN_STATUS_INTERNAL_FAILURE
;
5342 memcpy(nan_event_data
->nan_reason
, "NAN_STATUS_INTERNAL_FAILURE",
5343 strlen("NAN_STATUS_INTERNAL_FAILURE"));
5346 if (pev
->svctype
== NAN_SC_SUBSCRIBE
) {
5347 hal_event_id
= GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED
;
5349 hal_event_id
= GOOGLE_NAN_EVENT_PUBLISH_TERMINATED
;
5351 #ifdef WL_NAN_DISC_CACHE
5352 /* terminate ranging sessions */
5353 wl_cfgnan_terminate_ranging_sessions(bcmcfg_to_prmry_ndev(cfg
),
5354 cfg
, pev
->instance_id
);
5355 #endif /* WL_NAN_DISC_CACHE */
5359 case WL_NAN_EVENT_RECEIVE
: {
5360 nan_opts_len
= data_len
;
5361 hal_event_id
= GOOGLE_NAN_EVENT_FOLLOWUP
;
5362 xtlv_opt
= BCM_IOV_CMD_OPT_ALIGN_NONE
;
5366 case WL_NAN_EVENT_TXS
: {
5367 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5368 wl_nan_event_txs_t
*txs
= (wl_nan_event_txs_t
*)xtlv
->data
;
5369 wl_nan_event_sd_txs_t
*txs_sd
= NULL
;
5370 if (txs
->status
== WL_NAN_TXS_SUCCESS
) {
5371 WL_MEM(("TXS success for type %d token %d",
5372 txs
->type
, txs
->host_seq
));
5373 nan_event_data
->status
= NAN_STATUS_SUCCESS
;
5374 memcpy(nan_event_data
->nan_reason
, "NAN_STATUS_SUCCESS",
5375 strlen("NAN_STATUS_SUCCESS"));
5377 /* TODO : populate status based on reason codes
5378 For now adding it as no ACK, so that app/framework can retry
5380 WL_INFORM_MEM(("TXS failed for type %d status %d token %d",
5381 txs
->type
, txs
->status
, txs
->host_seq
));
5382 nan_event_data
->status
= NAN_STATUS_NO_OTA_ACK
;
5383 memcpy(nan_event_data
->nan_reason
, "NAN_STATUS_NO_OTA_ACK",
5384 strlen("NAN_STATUS_NO_OTA_ACK"));
5386 nan_event_data
->reason
= txs
->reason_code
;
5387 nan_event_data
->token
= txs
->host_seq
;
5388 if (txs
->type
== WL_NAN_FRM_TYPE_FOLLOWUP
) {
5389 hal_event_id
= GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND
;
5390 xtlv
= (bcm_xtlv_t
*)(txs
->opt_tlvs
);
5391 if (txs
->opt_tlvs_len
&& xtlv
->id
== WL_NAN_XTLV_SD_TXS
) {
5392 txs_sd
= (wl_nan_event_sd_txs_t
*)xtlv
->data
;
5393 nan_event_data
->local_inst_id
= txs_sd
->inst_id
;
5395 WL_ERR(("Invalid params in TX status for trasnmit followup"));
5399 } else { /* TODO: add for other frame types if required */
5406 case WL_NAN_EVENT_DISCOVERY_RESULT
: {
5407 nan_opts_len
= data_len
;
5408 hal_event_id
= GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH
;
5409 xtlv_opt
= BCM_IOV_CMD_OPT_ALIGN_NONE
;
5412 #ifdef WL_NAN_DISC_CACHE
5413 case WL_NAN_EVENT_RNG_RPT_IND
: {
5414 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5415 wl_nan_ev_rng_rpt_ind_t
*range_res
= (wl_nan_ev_rng_rpt_ind_t
*)xtlv
->data
;
5416 nan_disc_result_cache
*cache
;
5417 nan_event_data
->ranging_result_present
= 1;
5418 nan_event_data
->range_measurement_cm
= range_res
->dist_mm
/10;
5419 memcpy(&nan_event_data
->remote_nmi
, &range_res
->peer_m_addr
, ETHER_ADDR_LEN
);
5420 nan_event_data
->ranging_ind
= range_res
->indication
;
5421 WL_TRACE(("ranging ind = %d\n", range_res
->indication
));
5422 /* check in cache */
5423 cache
= wl_cfgnan_get_disc_result(cfg
,
5424 0, &range_res
->peer_m_addr
);
5427 WL_ERR(("Disc Cache entry not present for peer: " MACDBG
"\n",
5428 MAC2STRDBG(range_res
->peer_m_addr
.octet
)));
5431 WL_TRACE(("Disc cache entry, populate it\n"));
5432 ret
= wl_nan_cache_to_event_data(cache
,
5433 nan_event_data
, cfg
->osh
);
5434 if (ret
!= BCME_OK
) {
5439 case WL_NAN_EVENT_RNG_REQ_IND
: {
5440 wl_nan_ev_rng_req_ind_t
*rng_ind
;
5441 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5443 nan_opts_len
= data_len
;
5444 rng_ind
= (wl_nan_ev_rng_req_ind_t
*)xtlv
->data
;
5445 xtlv_opt
= BCM_IOV_CMD_OPT_ALIGN_NONE
;
5446 WL_TRACE(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d\n",
5448 ret
= wl_cfgnan_handle_ranging_ind(cfg
, rng_ind
);
5449 /* no need to event to HAL */
5453 case WL_NAN_EVENT_RNG_TERM_IND
: {
5454 bcm_xtlv_t
*xtlv
= (bcm_xtlv_t
*)event_data
;
5455 nan_ranging_inst_t
*rng_inst
;
5456 wl_nan_ev_rng_term_ind_t
*range_term
= (wl_nan_ev_rng_term_ind_t
*)xtlv
->data
;
5457 WL_TRACE(("Peer_NMI: " MACDBG
"\n",
5458 MAC2STRDBG(range_term
->peer_m_addr
.octet
)));
5459 WL_TRACE(("Reason code:%d\n", range_term
->reason_code
));
5460 WL_TRACE(("Received WL_NAN_EVENT_RNG_TERM_IND\n"));
5461 rng_inst
= wl_cfgnan_check_for_ranging(cfg
, &range_term
->peer_m_addr
);
5463 /* clear ranging instance */
5464 WL_TRACE(("reset the ranging instance"));
5465 memset(rng_inst
, 0, sizeof(*rng_inst
));
5469 #endif /* WL_NAN_DISC_CACHE */
5471 * Data path events data are received in common event struct,
5472 * Handling all the events as part of one case, hence fall through is intentional
5474 case WL_NAN_EVENT_PEER_DATAPATH_IND
:
5475 case WL_NAN_EVENT_DATAPATH_ESTB
:
5476 case WL_NAN_EVENT_DATAPATH_END
: {
5477 ret
= wl_nan_dp_cmn_event_data(cfg
, event_data
, data_len
,
5478 &tlvs_offset
, &nan_opts_len
,
5479 event_num
, &hal_event_id
, nan_event_data
);
5480 /* Avoiding optional param parsing for DP END Event */
5481 if (event_num
== WL_NAN_EVENT_DATAPATH_END
) {
5483 xtlv_opt
= BCM_IOV_CMD_OPT_ALIGN_NONE
;
5485 if (unlikely(ret
)) {
5486 WL_ERR(("nan dp common event data parse failed\n"));
5492 WL_ERR(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num
));
5498 tlv_buf
= (uint8
*)event_data
+ tlvs_offset
;
5499 /* Extract event data tlvs and pass their resp to cb fn */
5500 ret
= bcm_unpack_xtlv_buf((void *)&nan_event_ctx
, (const uint8
*)tlv_buf
,
5501 nan_opts_len
, xtlv_opt
, wl_cfgnan_set_vars_cbfn
);
5502 if (ret
!= BCME_OK
) {
5503 WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret
));
5507 #ifdef WL_NAN_DISC_CACHE
5508 if (hal_event_id
== GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH
) {
5509 WL_TRACE(("Cache disc res\n"));
5510 ret
= wl_cfgnan_cache_disc_result(cfg
, nan_event_data
);
5512 WL_ERR(("Failed to cache disc result ret %d\n", ret
));
5514 if (nan_event_data
->sde_control_flag
& NAN_SDE_CF_RANGING_REQUIRED
) {
5515 ret
= wl_cfgnan_check_disc_res_for_ranging(cfg
, nan_event_data
);
5516 if (ret
== BCME_OK
) {
5517 /* disc result to HAL will be given on ranging report */
5520 /* TODO: should we terminate service if ranging fails ? */
5521 WL_ERR(("Ranging failed or not required"));
5524 WL_TRACE(("Ranging not required\n"));
5527 * If tx match filter is present as part of active subscribe, keep same filter
5528 * values in discovery results also.
5530 if (nan_event_data
->sub_id
== nan_event_data
->requestor_id
) {
5531 svc
= wl_cfgnan_get_svc_inst(cfg
, nan_event_data
->sub_id
, 0);
5532 if (svc
&& svc
->tx_match_filter_len
) {
5533 nan_event_data
->tx_match_filter
.dlen
= svc
->tx_match_filter_len
;
5534 nan_event_data
->tx_match_filter
.data
=
5535 MALLOCZ(cfg
->osh
, svc
->tx_match_filter_len
);
5536 if (!nan_event_data
->tx_match_filter
.data
) {
5537 WL_ERR(("%s: tx_match_filter_data alloc failed\n",
5539 nan_event_data
->tx_match_filter
.dlen
= 0;
5543 memcpy(nan_event_data
->tx_match_filter
.data
,
5544 svc
->tx_match_filter
, svc
->tx_match_filter_len
);
5548 #endif /* WL_NAN_DISC_CACHE */
5550 /* Send up range result as subscribe match event */
5551 if (event_num
== WL_NAN_EVENT_RNG_RPT_IND
) {
5552 WL_TRACE(("Send up range result as subscribe match event\n"));
5553 hal_event_id
= GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH
;
5556 WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
5557 nan_event_to_str(event_num
), event_num
, hal_event_id
));
5558 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
5559 ret
= wl_cfgvendor_send_nan_event(cfg
->wdev
->wiphy
, bcmcfg_to_prmry_ndev(cfg
),
5560 hal_event_id
, nan_event_data
);
5561 if (ret
!= BCME_OK
) {
5562 WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
5563 nan_event_to_str(event_num
), event_num
));
5565 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
5568 if (nan_event_data
) {
5569 if (nan_event_data
->tx_match_filter
.data
) {
5570 MFREE(cfg
->osh
, nan_event_data
->tx_match_filter
.data
,
5571 nan_event_data
->tx_match_filter
.dlen
);
5572 nan_event_data
->tx_match_filter
.data
= NULL
;
5574 if (nan_event_data
->rx_match_filter
.data
) {
5575 MFREE(cfg
->osh
, nan_event_data
->rx_match_filter
.data
,
5576 nan_event_data
->rx_match_filter
.dlen
);
5577 nan_event_data
->rx_match_filter
.data
= NULL
;
5579 if (nan_event_data
->svc_info
.data
) {
5580 MFREE(cfg
->osh
, nan_event_data
->svc_info
.data
,
5581 nan_event_data
->svc_info
.dlen
);
5582 nan_event_data
->svc_info
.data
= NULL
;
5584 if (nan_event_data
->sde_svc_info
.data
) {
5585 MFREE(cfg
->osh
, nan_event_data
->sde_svc_info
.data
,
5586 nan_event_data
->sde_svc_info
.dlen
);
5587 nan_event_data
->sde_svc_info
.data
= NULL
;
5589 MFREE(cfg
->osh
, nan_event_data
, sizeof(*nan_event_data
));
5597 #ifdef WL_NAN_DISC_CACHE
5599 wl_cfgnan_cache_disc_result(struct bcm_cfg80211
*cfg
, void * data
)
5601 nan_event_data_t
* disc
= (nan_event_data_t
*)data
;
5602 int i
, add_index
= 0;
5604 nan_disc_result_cache
*disc_res
= cfg
->nan_disc_cache
;
5606 if (!cfg
->nan_enable
) {
5607 WL_DBG(("nan not enabled"));
5608 return BCME_NOTENABLED
;
5610 if (cfg
->nan_disc_count
== NAN_MAX_CACHE_DISC_RESULT
) {
5611 WL_DBG(("cache full"));
5612 ret
= BCME_NORESOURCE
;
5616 for (i
= 0; i
< NAN_MAX_CACHE_DISC_RESULT
; i
++) {
5617 if (!disc_res
[i
].valid
) {
5621 if (!memcmp(&disc_res
[i
].peer
, &disc
->remote_nmi
, ETHER_ADDR_LEN
) &&
5622 !memcmp(disc_res
[i
].svc_hash
, disc
->svc_name
, WL_NAN_SVC_HASH_LEN
)) {
5623 WL_TRACE(("cache entry already present"));
5624 ret
= BCME_OK
; /* entry already present */
5628 WL_TRACE(("adding cache entry"));
5629 disc_res
[add_index
].valid
= 1;
5630 disc_res
[add_index
].pub_id
= disc
->pub_id
;
5631 disc_res
[add_index
].sub_id
= disc
->sub_id
;
5632 disc_res
[add_index
].publish_rssi
= disc
->publish_rssi
;
5633 disc_res
[add_index
].peer_cipher_suite
= disc
->peer_cipher_suite
;
5634 memcpy(&disc_res
[add_index
].peer
, &disc
->remote_nmi
, ETHER_ADDR_LEN
);
5635 memcpy(disc_res
[add_index
].svc_hash
, disc
->svc_name
, WL_NAN_SVC_HASH_LEN
);
5637 if (disc
->svc_info
.dlen
&& disc
->svc_info
.data
) {
5638 disc_res
[add_index
].svc_info
.dlen
= disc
->svc_info
.dlen
;
5639 disc_res
[add_index
].svc_info
.data
=
5640 MALLOCZ(cfg
->osh
, disc_res
[add_index
].svc_info
.dlen
);
5641 if (!disc_res
[add_index
].svc_info
.data
) {
5642 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
5643 disc_res
[add_index
].svc_info
.dlen
= 0;
5647 memcpy(disc_res
[add_index
].svc_info
.data
,
5648 disc
->svc_info
.data
, disc
->svc_info
.dlen
);
5650 if (disc
->tx_match_filter
.dlen
&& disc
->tx_match_filter
.data
) {
5651 disc_res
[add_index
].tx_match_filter
.dlen
= disc
->tx_match_filter
.dlen
;
5652 disc_res
[add_index
].tx_match_filter
.data
=
5653 MALLOCZ(cfg
->osh
, disc_res
[add_index
].tx_match_filter
.dlen
);
5654 if (!disc_res
[add_index
].tx_match_filter
.data
) {
5655 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__
));
5656 disc_res
[add_index
].tx_match_filter
.dlen
= 0;
5660 memcpy(disc_res
[add_index
].tx_match_filter
.data
,
5661 disc
->tx_match_filter
.data
, disc
->tx_match_filter
.dlen
);
5663 cfg
->nan_disc_count
++;
5669 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211
*cfg
,
5673 int ret
= BCME_NOTFOUND
;
5674 nan_disc_result_cache
*disc_res
= cfg
->nan_disc_cache
;
5675 if (!cfg
->nan_enable
) {
5676 WL_DBG(("nan not enabled\n"));
5677 ret
= BCME_NOTENABLED
;
5680 for (i
= 0; i
< NAN_MAX_CACHE_DISC_RESULT
; i
++) {
5681 if (disc_res
[i
].sub_id
== local_subid
) {
5682 WL_TRACE(("make cache entry invalid\n"));
5683 disc_res
[i
].valid
= 0;
5684 cfg
->nan_disc_count
--;
5688 WL_DBG(("couldn't find entry\n"));
5693 static nan_disc_result_cache
*
5694 wl_cfgnan_get_disc_result(struct bcm_cfg80211
*cfg
, uint8 remote_pubid
,
5695 struct ether_addr
*peer
)
5698 nan_disc_result_cache
*disc_res
= cfg
->nan_disc_cache
;
5700 for (i
= 0; i
< NAN_MAX_CACHE_DISC_RESULT
; i
++) {
5701 if ((disc_res
[i
].pub_id
== remote_pubid
) &&
5702 !memcmp(&disc_res
[i
].peer
, peer
, ETHER_ADDR_LEN
)) {
5703 WL_TRACE(("Found entry"));
5704 return &disc_res
[i
];
5708 for (i
= 0; i
< NAN_MAX_CACHE_DISC_RESULT
; i
++) {
5709 if (!memcmp(&disc_res
[i
].peer
, peer
, ETHER_ADDR_LEN
)) {
5710 WL_TRACE(("Found entry"));
5711 return &disc_res
[i
];
5717 #endif /* WL_NAN_DISC_CACHE */
5720 wl_cfgnan_update_dp_mask(struct bcm_cfg80211
*cfg
, bool enable
, u8 nan_dp_id
)
5722 #ifdef ARP_OFFLOAD_SUPPORT
5723 dhd_pub_t
*dhd
= (struct dhd_pub
*)(cfg
->pub
);
5724 #endif /* ARP_OFFLOAD_SUPPORT */
5725 /* As of now, we don't see a need to know which ndp is active.
5726 * so just keep tracking of ndp via count. If we need to know
5727 * the status of each ndp based on ndp id, we need to change
5728 * this implementation to use a bit mask.
5731 WL_ERR(("dhd pub null!\n"));
5736 /* On first NAN DP indication, disable ARP. */
5737 #ifdef ARP_OFFLOAD_SUPPORT
5738 if (!cfg
->nan_dp_mask
) {
5739 dhd_arp_offload_set(dhd
, 0);
5740 dhd_arp_offload_enable(dhd
, false);
5742 #endif /* ARP_OFFLOAD_SUPPORT */
5743 cfg
->nan_dp_mask
|= (0x1 << nan_dp_id
);
5745 cfg
->nan_dp_mask
&= ~(0x1 << nan_dp_id
);
5746 #ifdef ARP_OFFLOAD_SUPPORT
5747 if (!cfg
->nan_dp_mask
) {
5748 /* If NAN DP count becomes zero and if there
5749 * are no conflicts, enable back ARP offload.
5750 * As of now, the conflicting interfaces are AP
5751 * and P2P. But NAN + P2P/AP concurrency is not
5754 dhd_arp_offload_set(dhd
, dhd_arp_mode
);
5755 dhd_arp_offload_enable(dhd
, true);
5757 #endif /* ARP_OFFLOAD_SUPPORT */
5759 WL_INFORM_MEM(("NAN_DP_MASK:0x%x\n", cfg
->nan_dp_mask
));
5763 wl_cfgnan_is_dp_active(struct net_device
*ndev
)
5765 struct bcm_cfg80211
*cfg
;
5768 if (!ndev
|| !ndev
->ieee80211_ptr
) {
5769 WL_ERR(("ndev/wdev null\n"));
5773 cfg
= wiphy_priv(ndev
->ieee80211_ptr
->wiphy
);
5774 nan_dp
= cfg
->nan_dp_mask
? true : false;
5776 WL_DBG(("NAN DP status:%d\n", nan_dp
));
5781 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211
*cfg
)
5784 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
5785 if (!cfg
->nancfg
.ndi
[i
].in_use
) {
5786 /* Free interface, use it */
5790 /* Don't have a free interface */
5795 wl_cfgnan_add_ndi_data(struct bcm_cfg80211
*cfg
, s32 idx
, char *name
)
5798 if (!name
|| (idx
< 0) || (idx
>= NAN_MAX_NDI
)) {
5802 /* Ensure ifname string size <= IFNAMSIZ including null termination */
5803 len
= MIN(strlen(name
), (IFNAMSIZ
- 1));
5804 strncpy(cfg
->nancfg
.ndi
[idx
].ifname
, name
, len
);
5805 cfg
->nancfg
.ndi
[idx
].ifname
[len
] = '\0';
5806 cfg
->nancfg
.ndi
[idx
].in_use
= true;
5807 cfg
->nancfg
.ndi
[idx
].created
= false;
5809 /* Don't have a free interface */
5814 wl_cfgnan_del_ndi_data(struct bcm_cfg80211
*cfg
, char *name
)
5822 len
= MIN(strlen(name
), IFNAMSIZ
);
5823 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
5824 if (strncmp(cfg
->nancfg
.ndi
[i
].ifname
, name
, len
) == 0) {
5825 memset(&cfg
->nancfg
.ndi
[i
].ifname
, 0x0, IFNAMSIZ
);
5826 cfg
->nancfg
.ndi
[i
].in_use
= false;
5827 cfg
->nancfg
.ndi
[i
].created
= false;
5834 struct wl_ndi_data
*
5835 wl_cfgnan_get_ndi_data(struct bcm_cfg80211
*cfg
, char *name
)
5843 len
= MIN(strlen(name
), IFNAMSIZ
);
5844 for (i
= 0; i
< NAN_MAX_NDI
; i
++) {
5845 if (strncmp(cfg
->nancfg
.ndi
[i
].ifname
, name
, len
) == 0) {
5846 return &cfg
->nancfg
.ndi
[i
];