wireless: fix all kind of warnings
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / wireless / bcmdhd4361 / wl_cfgnan.c
1 /*
2 * Neighbor Awareness Networking
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 * <<Broadcom-WL-IPTag/Open:>>
25 *
26 * $Id: wl_cfgnan.c 759402 2018-04-25 10:01:49Z $
27 */
28
29 #ifdef WL_NAN
30 #include <bcmutils.h>
31 #include <bcmendian.h>
32 #include <bcmwifi_channels.h>
33 #include <nan.h>
34 #include <bcmiov.h>
35
36 #include <wl_cfg80211.h>
37 #include <wl_android.h>
38 #include <wl_cfgnan.h>
39
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 #include <wl_cfgvendor.h>
43 #include <bcmbloom.h>
44 #include <wl_cfgp2p.h>
45
46 #define NAN_RANGE_REQ_CMD 0
47 #define NAN_RANGE_REQ_EVNT 1
48 #define NAN_RAND_MAC_RETRIES 10
49 #define NAN_SCAN_DWELL_TIME_DELTA_MS 10
50
51 #ifdef WL_NAN_DISC_CACHE
52 static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data);
53 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
54 static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
55 uint8 remote_pubid, struct ether_addr *peer);
56 #endif /* WL_NAN_DISC_CACHE */
57 static void wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id);
58
59 static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
60
61 static const char *nan_role_to_str(u8 role)
62 {
63 switch (role) {
64 C2S(WL_NAN_ROLE_AUTO)
65 C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC)
66 C2S(WL_NAN_ROLE_NON_MASTER_SYNC)
67 C2S(WL_NAN_ROLE_MASTER)
68 C2S(WL_NAN_ROLE_ANCHOR_MASTER)
69 default:
70 return "WL_NAN_ROLE_UNKNOWN";
71 }
72 }
73
74 static const char *nan_event_to_str(u16 cmd)
75 {
76 switch (cmd) {
77 C2S(WL_NAN_EVENT_START)
78 C2S(WL_NAN_EVENT_DISCOVERY_RESULT)
79 C2S(WL_NAN_EVENT_TERMINATED)
80 C2S(WL_NAN_EVENT_RECEIVE)
81 C2S(WL_NAN_EVENT_MERGE)
82 C2S(WL_NAN_EVENT_STOP)
83 C2S(WL_NAN_EVENT_PEER_DATAPATH_IND)
84 C2S(WL_NAN_EVENT_DATAPATH_ESTB)
85 C2S(WL_NAN_EVENT_SDF_RX)
86 C2S(WL_NAN_EVENT_DATAPATH_END)
87 C2S(WL_NAN_EVENT_RNG_REQ_IND)
88 C2S(WL_NAN_EVENT_RNG_RPT_IND)
89 C2S(WL_NAN_EVENT_RNG_TERM_IND)
90 C2S(WL_NAN_EVENT_TXS)
91 C2S(WL_NAN_EVENT_INVALID)
92
93 default:
94 return "WL_NAN_EVENT_UNKNOWN";
95 }
96 }
97
98 static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
99 struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
100 uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
101 uint16 resp_buf_len);
102 #ifdef WL_NAN_DISC_CACHE
103 /* ranging quest and response iovar handler */
104 static int wl_cfgnan_trigger_ranging(struct net_device *ndev,
105 struct bcm_cfg80211 *cfg, void *event_data, nan_svc_info_t *svc, uint8 range_req);
106 #endif /* WL_NAN_DISC_CACHE */
107 static s32
108 wl_cfgnan_send_stop_event(nan_event_data_t *nan_event_data, struct bcm_cfg80211 *cfg);
109 int
110 wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
111 {
112 s32 ret = BCME_OK;
113 uint8 i = 0;
114 if (p_inst_id == NULL) {
115 WL_ERR(("Invalid arguments\n"));
116 ret = -EINVAL;
117 goto exit;
118 }
119
120 if (cfg->nancfg.inst_id_start == NAN_ID_MAX) {
121 WL_ERR(("Consumed all IDs, resetting the counter\n"));
122 cfg->nancfg.inst_id_start = 0;
123 }
124
125 for (i = cfg->nancfg.inst_id_start; i < NAN_ID_MAX; i++) {
126 if (isclr(cfg->nancfg.svc_inst_id_mask, i)) {
127 setbit(cfg->nancfg.svc_inst_id_mask, i);
128 *p_inst_id = i + 1;
129 cfg->nancfg.inst_id_start = *p_inst_id;
130 WL_DBG(("Instance ID=%d\n", *p_inst_id));
131 goto exit;
132 }
133 }
134 WL_ERR(("Allocated maximum IDs\n"));
135 ret = BCME_NORESOURCE;
136 exit:
137 return ret;
138 }
139
140 int
141 wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
142 {
143 s32 ret = BCME_OK;
144 WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
145 clrbit(cfg->nancfg.svc_inst_id_mask, inst_id-1);
146 return ret;
147 }
148 s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
149 uint16 len, nan_event_data_t *tlv_data)
150 {
151 const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
152 uint8 offset;
153 s32 ret = BCME_OK;
154
155 /* service descriptor ext attributes */
156 nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
157
158 /* attribute ID */
159 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
160
161 /* attribute length */
162 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
163
164 tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
165 offset = sizeof(*nan_svc_desc_ext_attr);
166 if (offset > len) {
167 WL_ERR(("Invalid event buffer len\n"));
168 ret = BCME_BUFTOOSHORT;
169 goto fail;
170 }
171 p_attr += offset;
172 len -= offset;
173
174 if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
175 WL_TRACE(("> svc_control: range limited present\n"));
176 }
177 if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
178 WL_TRACE(("> svc_control: sdea svc specific info present\n"));
179 tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
180 WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
181 if (!tlv_data->sde_svc_info.dlen ||
182 tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
183 /* must be able to handle null msg which is not error */
184 tlv_data->sde_svc_info.dlen = 0;
185 WL_ERR(("data length is invalid\n"));
186 ret = BCME_BADLEN;
187 goto fail;
188 }
189
190 if (tlv_data->sde_svc_info.dlen > 0) {
191 tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
192 if (!tlv_data->sde_svc_info.data) {
193 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
194 tlv_data->sde_svc_info.dlen = 0;
195 ret = BCME_NOMEM;
196 goto fail;
197 }
198 /* advance read pointer, consider sizeof of Service Update Indicator */
199 offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
200 if (offset > len) {
201 WL_ERR(("Invalid event buffer len\n"));
202 ret = BCME_BUFTOOSHORT;
203 goto fail;
204 }
205 p_attr += offset;
206 len -= offset;
207 memcpy(tlv_data->sde_svc_info.data, p_attr, tlv_data->sde_svc_info.dlen);
208 } else {
209 /* must be able to handle null msg which is not error */
210 tlv_data->sde_svc_info.dlen = 0;
211 WL_DBG(("%s: sdea svc info length is zero, null info data\n",
212 __FUNCTION__));
213 }
214 }
215 return ret;
216 fail:
217 if (tlv_data->sde_svc_info.data) {
218 MFREE(osh, tlv_data->sde_svc_info.data,
219 tlv_data->sde_svc_info.dlen);
220 tlv_data->sde_svc_info.data = NULL;
221 }
222
223 WL_DBG(("Parse SDEA event data, status = %d\n", ret));
224 return ret;
225 }
226
227 /*
228 * This attribute contains some mandatory fields and some optional fields
229 * depending on the content of the service discovery request.
230 */
231 s32
232 wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
233 uint16 len, nan_event_data_t *tlv_data)
234 {
235 uint8 svc_control = 0, offset = 0;
236 s32 ret = BCME_OK;
237 const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
238
239 /* service descriptor attributes */
240 nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
241 /* attribute ID */
242 WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
243
244 /* attribute length */
245 WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
246
247 /* service ID */
248 memcpy(tlv_data->svc_name, nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
249 WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
250
251 /* local instance ID */
252 tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
253 WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
254
255 /* requestor instance ID */
256 tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
257 WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
258
259 /* service control */
260 svc_control = nan_svc_desc_attr->svc_control;
261 if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
262 WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
263 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
264 WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
265 } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
266 WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
267 }
268 offset = sizeof(*nan_svc_desc_attr);
269 if (offset > len) {
270 WL_ERR(("Invalid event buffer len\n"));
271 ret = BCME_BUFTOOSHORT;
272 goto fail;
273 }
274 p_attr += offset;
275 len -= offset;
276
277 /*
278 * optional fields:
279 * must be in order following by service descriptor attribute format
280 */
281
282 /* binding bitmap */
283 if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
284 uint16 bitmap = 0;
285 WL_TRACE(("> svc_control: binding bitmap present\n"));
286
287 /* Copy binding bitmap */
288 memcpy(&bitmap, p_attr, NAN_BINDING_BITMAP_LEN);
289 WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
290
291 if (NAN_BINDING_BITMAP_LEN > len) {
292 WL_ERR(("Invalid event buffer len\n"));
293 ret = BCME_BUFTOOSHORT;
294 goto fail;
295 }
296 p_attr += NAN_BINDING_BITMAP_LEN;
297 len -= NAN_BINDING_BITMAP_LEN;
298 }
299
300 /* matching filter */
301 if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
302 WL_TRACE(("> svc_control: matching filter present\n"));
303
304 tlv_data->tx_match_filter.dlen = *p_attr++;
305 WL_TRACE(("> matching filter len: 0x%02x\n",
306 tlv_data->tx_match_filter.dlen));
307
308 if (!tlv_data->tx_match_filter.dlen ||
309 tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
310 tlv_data->tx_match_filter.dlen = 0;
311 WL_ERR(("tx match filter length is invalid\n"));
312 ret = -EINVAL;
313 goto fail;
314 }
315 tlv_data->tx_match_filter.data =
316 MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
317 if (!tlv_data->tx_match_filter.data) {
318 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
319 tlv_data->tx_match_filter.dlen = 0;
320 ret = -ENOMEM;
321 goto fail;
322 }
323 memcpy(tlv_data->tx_match_filter.data, p_attr,
324 tlv_data->tx_match_filter.dlen);
325
326 /* advance read pointer */
327 offset = tlv_data->tx_match_filter.dlen;
328 if (offset > len) {
329 WL_ERR(("Invalid event buffer\n"));
330 ret = BCME_BUFTOOSHORT;
331 goto fail;
332 }
333 p_attr += offset;
334 len -= offset;
335 }
336
337 /* service response filter */
338 if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
339 WL_TRACE(("> svc_control: service response filter present\n"));
340
341 tlv_data->rx_match_filter.dlen = *p_attr++;
342 WL_TRACE(("> sr match filter len: 0x%02x\n",
343 tlv_data->rx_match_filter.dlen));
344
345 if (!tlv_data->rx_match_filter.dlen ||
346 tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
347 tlv_data->rx_match_filter.dlen = 0;
348 WL_ERR(("%s: sr matching filter length is invalid\n",
349 __FUNCTION__));
350 ret = BCME_BADLEN;
351 goto fail;
352 }
353 tlv_data->rx_match_filter.data =
354 MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
355 if (!tlv_data->rx_match_filter.data) {
356 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
357 tlv_data->rx_match_filter.dlen = 0;
358 ret = BCME_NOMEM;
359 goto fail;
360 }
361
362 memcpy(tlv_data->rx_match_filter.data, p_attr,
363 tlv_data->rx_match_filter.dlen);
364
365 /* advance read pointer */
366 offset = tlv_data->rx_match_filter.dlen;
367 if (offset > len) {
368 WL_ERR(("Invalid event buffer len\n"));
369 ret = BCME_BUFTOOSHORT;
370 goto fail;
371 }
372 p_attr += offset;
373 len -= offset;
374 }
375
376 /* service specific info */
377 if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
378 WL_TRACE(("> svc_control: svc specific info present\n"));
379
380 tlv_data->svc_info.dlen = *p_attr++;
381 WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
382
383 if (!tlv_data->svc_info.dlen ||
384 tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
385 /* must be able to handle null msg which is not error */
386 tlv_data->svc_info.dlen = 0;
387 WL_ERR(("data length is invalid\n"));
388 ret = BCME_BADLEN;
389 goto fail;
390 }
391
392 if (tlv_data->svc_info.dlen > 0) {
393 tlv_data->svc_info.data =
394 MALLOCZ(osh, tlv_data->svc_info.dlen);
395 if (!tlv_data->svc_info.data) {
396 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
397 tlv_data->svc_info.dlen = 0;
398 ret = BCME_NOMEM;
399 goto fail;
400 }
401 memcpy(tlv_data->svc_info.data, p_attr, tlv_data->svc_info.dlen);
402
403 /* advance read pointer */
404 offset = tlv_data->svc_info.dlen;
405 if (offset > len) {
406 WL_ERR(("Invalid event buffer len\n"));
407 ret = BCME_BUFTOOSHORT;
408 goto fail;
409 }
410 p_attr += offset;
411 len -= offset;
412 } else {
413 /* must be able to handle null msg which is not error */
414 tlv_data->svc_info.dlen = 0;
415 WL_TRACE(("%s: svc info length is zero, null info data\n",
416 __FUNCTION__));
417 }
418 }
419
420 /*
421 * discovery range limited:
422 * If set to 1, the pub/sub msg is limited in range to close proximity.
423 * If set to 0, the pub/sub msg is not limited in range.
424 * Valid only when the message is either of a publish or a sub.
425 */
426 if (svc_control & NAN_SC_RANGE_LIMITED) {
427 if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
428 ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
429 WL_TRACE(("> svc_control: range limited present\n"));
430 } else {
431 WL_TRACE(("range limited is only valid on pub or sub\n"));
432 }
433
434 /* TODO: send up */
435
436 /* advance read pointer */
437 p_attr++;
438 }
439 return ret;
440 fail:
441 if (tlv_data->tx_match_filter.data) {
442 MFREE(osh, tlv_data->tx_match_filter.data,
443 tlv_data->tx_match_filter.dlen);
444 tlv_data->tx_match_filter.data = NULL;
445 }
446 if (tlv_data->rx_match_filter.data) {
447 MFREE(osh, tlv_data->rx_match_filter.data,
448 tlv_data->rx_match_filter.dlen);
449 tlv_data->rx_match_filter.data = NULL;
450 }
451 if (tlv_data->svc_info.data) {
452 MFREE(osh, tlv_data->svc_info.data,
453 tlv_data->svc_info.dlen);
454 tlv_data->svc_info.data = NULL;
455 }
456
457 WL_DBG(("Parse SDA event data, status = %d\n", ret));
458 return ret;
459 }
460
461 static s32
462 wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
463 nan_event_data_t *tlv_data, uint16 type) {
464 const uint8 *p_attr = data;
465 uint16 offset = 0;
466 s32 ret = BCME_OK;
467 const wl_nan_event_disc_result_t *ev_disc = NULL;
468 const wl_nan_event_replied_t *ev_replied = NULL;
469 const wl_nan_ev_receive_t *ev_fup = NULL;
470
471 /*
472 * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
473 */
474 if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
475 u8 iter;
476 ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
477
478 WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
479
480 tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
481 tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
482 tlv_data->publish_rssi = ev_disc->publish_rssi;
483 memcpy(&tlv_data->remote_nmi, &ev_disc->pub_mac, ETHER_ADDR_LEN);
484
485 WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
486 WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
487 WL_TRACE(("publish mac addr: " MACDBG "\n",
488 MAC2STRDBG(ev_disc->pub_mac.octet)));
489 WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
490 WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
491 WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
492
493 /* advance to the service descricptor */
494 offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
495 if (offset > len) {
496 WL_ERR(("Invalid event buffer len\n"));
497 ret = BCME_BUFTOOSHORT;
498 goto fail;
499 }
500 p_attr += offset;
501 len -= offset;
502
503 iter = ev_disc->attr_num;
504 while (iter) {
505 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
506 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
507 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
508 }
509
510 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
511 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
512 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
513 }
514 offset = (sizeof(*p_attr) +
515 sizeof(ev_disc->attr_list_len) +
516 (p_attr[1] | (p_attr[2] << 8)));
517 if (offset > len) {
518 WL_ERR(("Invalid event buffer len\n"));
519 ret = BCME_BUFTOOSHORT;
520 goto fail;
521 }
522 p_attr += offset;
523 len -= offset;
524 iter--;
525 }
526 } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
527 uint8 iter;
528 ev_fup = (const wl_nan_ev_receive_t *)p_attr;
529
530 WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
531
532 tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
533 tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
534 tlv_data->fup_rssi = ev_fup->fup_rssi;
535 memcpy(&tlv_data->remote_nmi, &ev_fup->remote_addr, ETHER_ADDR_LEN);
536
537 WL_TRACE(("local id: %d\n", ev_fup->local_id));
538 WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
539 WL_TRACE(("peer mac addr: " MACDBG "\n",
540 MAC2STRDBG(ev_fup->remote_addr.octet)));
541 WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
542 WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
543 WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
544
545 /* advance to the service descriptor which is attr_list[0] */
546 offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
547 if (offset > len) {
548 WL_ERR(("Invalid event buffer len\n"));
549 ret = BCME_BUFTOOSHORT;
550 goto fail;
551 }
552 p_attr += offset;
553 len -= offset;
554
555 iter = ev_fup->attr_num;
556 while (iter) {
557 if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
558 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
559 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
560 if (unlikely(ret)) {
561 WL_ERR(("wl_cfgnan_parse_sda_data failed,"
562 "error = %d \n", ret));
563 goto fail;
564 }
565 }
566
567 if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
568 WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
569 ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
570 if (unlikely(ret)) {
571 WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
572 "error = %d \n", ret));
573 goto fail;
574 }
575 }
576 offset = (sizeof(*p_attr) +
577 sizeof(ev_fup->attr_list_len) +
578 (p_attr[1] | (p_attr[2] << 8)));
579 if (offset > len) {
580 WL_ERR(("Invalid event buffer len\n"));
581 ret = BCME_BUFTOOSHORT;
582 goto fail;
583 }
584 p_attr += offset;
585 len -= offset;
586 iter--;
587 }
588 } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
589 /*
590 * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
591 * and svc controls are optional.
592 */
593 const nan2_pub_act_frame_t *nan_pub_af =
594 (const nan2_pub_act_frame_t *)p_attr;
595
596 WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
597
598 /* nan2_pub_act_frame_t */
599 WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
600 WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
601 WL_TRACE(("nan oui: %2x-%2x-%2x\n",
602 nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
603 WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
604 WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
605
606 offset = sizeof(*nan_pub_af);
607 if (offset > len) {
608 WL_ERR(("Invalid event buffer len\n"));
609 ret = BCME_BUFTOOSHORT;
610 goto fail;
611 }
612 p_attr += offset;
613 len -= offset;
614 } else if (type == WL_NAN_XTLV_SD_REPLIED) {
615 ev_replied = (const wl_nan_event_replied_t *)p_attr;
616
617 WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
618
619 tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
620 tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
621 tlv_data->sub_rssi = ev_replied->sub_rssi;
622 memcpy(&tlv_data->remote_nmi, &ev_replied->sub_mac, ETHER_ADDR_LEN);
623
624 WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
625 WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
626 WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
627 MAC2STRDBG(ev_replied->sub_mac.octet)));
628 WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
629 WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
630 WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
631
632 /* advance to the service descriptor which is attr_list[0] */
633 offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
634 if (offset > len) {
635 WL_ERR(("Invalid event buffer len\n"));
636 ret = BCME_BUFTOOSHORT;
637 goto fail;
638 }
639 p_attr += offset;
640 len -= offset;
641 ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
642 }
643
644 fail:
645 return ret;
646 }
647
648 /* Based on each case of tlv type id, fill into tlv data */
649 int
650 wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
651 {
652 nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
653 nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
654 int ret = BCME_OK;
655
656 NAN_DBG_ENTER();
657 if (!data || !len) {
658 WL_ERR(("data length is invalid\n"));
659 ret = BCME_ERROR;
660 goto fail;
661 }
662
663 switch (type) {
664 /*
665 * Need to parse service descript attributes including service control,
666 * when Follow up or Discovery result come
667 */
668 case WL_NAN_XTLV_SD_FUP_RECEIVED:
669 case WL_NAN_XTLV_SD_DISC_RESULTS: {
670 ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
671 len, data, tlv_data, type);
672 break;
673 }
674 case WL_NAN_XTLV_SD_SVC_INFO: {
675 tlv_data->svc_info.data =
676 MALLOCZ(ctx_tlv_data->cfg->osh, len);
677 if (!tlv_data->svc_info.data) {
678 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
679 tlv_data->svc_info.dlen = 0;
680 ret = BCME_NOMEM;
681 goto fail;
682 }
683 tlv_data->svc_info.dlen = len;
684 memcpy(tlv_data->svc_info.data, data, tlv_data->svc_info.dlen);
685 break;
686 }
687 default:
688 WL_ERR(("Not available for tlv type = 0x%x\n", type));
689 ret = BCME_ERROR;
690 break;
691 }
692 fail:
693 NAN_DBG_EXIT();
694 return ret;
695 }
696
697 int
698 wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
699 uint16 *subcmd_len)
700 {
701 s32 ret = BCME_OK;
702
703 if (subcmd_len != NULL) {
704 *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
705 ALIGN_SIZE(data_size, 4);
706 if (*subcmd_len > nan_iov_len) {
707 WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
708 __FUNCTION__, *subcmd_len, nan_iov_len));
709 ret = BCME_NOMEM;
710 }
711 } else {
712 WL_ERR(("Invalid subcmd_len\n"));
713 ret = BCME_ERROR;
714 }
715 return ret;
716 }
717
718 int
719 wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
720 uint8 event_ind_flag, bool disable_events)
721 {
722 bcm_iov_batch_buf_t *nan_buf = NULL;
723 s32 ret = BCME_OK;
724 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
725 uint16 subcmd_len;
726 uint32 event_mask = 0;
727 uint32 status;
728 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
729 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
730 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
731
732 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
733
734 NAN_DBG_ENTER();
735
736 ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
737 if (unlikely(ret)) {
738 WL_ERR((" nan event enable failed, error = %d \n", ret));
739 goto fail;
740 }
741
742 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
743 if (!nan_buf) {
744 WL_ERR(("%s: memory allocation failed\n", __func__));
745 ret = BCME_NOMEM;
746 goto fail;
747 }
748
749 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
750 nan_buf->count = 0;
751 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
752 sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
753
754 ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
755 sizeof(event_mask), &subcmd_len);
756 if (unlikely(ret)) {
757 WL_ERR(("nan_sub_cmd check failed\n"));
758 goto fail;
759 }
760
761 sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
762 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(event_mask);
763 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
764 nan_buf_size -= subcmd_len;
765 nan_buf->count = 1;
766
767 if (disable_events) {
768 WL_DBG(("Disabling all nan events..except stop event\n"));
769 event_mask = NAN_EVENT_BIT(WL_NAN_EVENT_STOP);
770 } else {
771 /*
772 * Android framework event mask configuration.
773 */
774 if (event_ind_flag) {
775 nan_buf->is_set = false;
776 memset(resp_buf, 0, sizeof(resp_buf));
777 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
778 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
779 if (unlikely(ret) || unlikely(status)) {
780 WL_ERR(("get nan event mask failed ret %d status %d \n",
781 ret, status));
782 goto fail;
783 }
784 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
785
786 /* check the response buff */
787 event_mask = (*(uint8*)&sub_cmd_resp->data[0]);
788
789 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
790 WL_DBG(("Need to add disc mac addr change event\n"));
791 }
792 /* BIT2 - Disable nan cluster join indication (OTA). */
793 if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
794 event_mask &= ~NAN_EVENT_BIT(WL_NAN_EVENT_MERGE);
795 }
796 } else {
797 /* enable only selected nan events to avoid unnecessary host wake up */
798 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_START);
799 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_MERGE);
800 }
801
802 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DISCOVERY_RESULT);
803 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RECEIVE);
804 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_TERMINATED);
805 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_STOP);
806 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_TXS);
807 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_PEER_DATAPATH_IND);
808 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_ESTB);
809 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DATAPATH_END);
810 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_RPT_IND);
811 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_REQ_IND);
812 event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RNG_TERM_IND);
813 }
814
815 nan_buf->is_set = true;
816 memcpy(sub_cmd->data, &event_mask, sizeof(event_mask));
817 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
818 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
819 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
820 if (unlikely(ret) || unlikely(status)) {
821 WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
822 goto fail;
823 }
824 WL_DBG(("set nan event mask successfull\n"));
825
826 fail:
827 if (nan_buf) {
828 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
829 }
830 NAN_DBG_EXIT();
831 return ret;
832 }
833
834 static int
835 wl_cfgnan_set_nan_avail(struct net_device *ndev,
836 struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
837 {
838 bcm_iov_batch_buf_t *nan_buf = NULL;
839 s32 ret = BCME_OK;
840 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
841 uint16 subcmd_len;
842 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
843 wl_nan_iov_t *nan_iov_data = NULL;
844 wl_avail_t *avail = NULL;
845 wl_avail_entry_t *entry; /* used for filling entry structure */
846 uint8 *p; /* tracking pointer */
847 uint8 i;
848 u32 status;
849 int c;
850 char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
851 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
852 char *a = WL_AVAIL_BIT_MAP;
853 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
854
855 NAN_DBG_ENTER();
856
857 /* Do not disturb avail if dam is supported */
858 if (FW_SUPPORTED(dhdp, autodam)) {
859 WL_DBG(("DAM is supported, avail modification not allowed\n"));
860 return ret;
861 }
862
863 if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
864 WL_ERR(("Invalid availability type\n"));
865 ret = BCME_USAGE_ERROR;
866 goto fail;
867 }
868
869 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
870 if (!nan_buf) {
871 WL_ERR(("%s: memory allocation failed\n", __func__));
872 ret = BCME_NOMEM;
873 goto fail;
874 }
875
876 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
877 if (!nan_iov_data) {
878 WL_ERR(("%s: memory allocation failed\n", __func__));
879 ret = BCME_NOMEM;
880 goto fail;
881 }
882
883 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
884 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
885 nan_buf->count = 0;
886 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
887 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
888
889 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
890 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
891 sizeof(*avail), &subcmd_len);
892 if (unlikely(ret)) {
893 WL_ERR(("nan_sub_cmd check failed\n"));
894 goto fail;
895 }
896 avail = (wl_avail_t *)sub_cmd->data;
897
898 /* populate wl_avail_type */
899 avail->flags = avail_type;
900 if (avail_type == WL_AVAIL_RANGING) {
901 memcpy(&avail->addr, &cmd_data->peer_nmi, ETHER_ADDR_LEN);
902 }
903
904 sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
905 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
906 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
907
908 nan_buf->is_set = false;
909 nan_buf->count++;
910 nan_iov_data->nan_iov_len -= subcmd_len;
911 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
912
913 WL_TRACE(("Read wl nan avail status\n"));
914 memset(resp_buf, 0, sizeof(resp_buf));
915 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
916 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
917 if (unlikely(ret)) {
918 WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
919 goto fail;
920 }
921
922 if (status == BCME_NOTFOUND) {
923 nan_buf->count = 0;
924 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
925 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
926
927 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
928
929 avail = (wl_avail_t *)sub_cmd->data;
930 p = avail->entry;
931
932 /* populate wl_avail fields */
933 avail->length = OFFSETOF(wl_avail_t, entry);
934 avail->flags = avail_type;
935 avail->num_entries = 0;
936 avail->id = 0;
937 entry = (wl_avail_entry_t*)p;
938 entry->flags = WL_AVAIL_ENTRY_COM;
939
940 /* set default values for optional parameters */
941 entry->start_offset = 0;
942 entry->u.band = 0;
943
944 if (cmd_data->avail_period) {
945 entry->period = cmd_data->avail_period;
946 } else {
947 entry->period = WL_AVAIL_PERIOD_1024;
948 }
949
950 if (cmd_data->duration != NAN_BAND_INVALID) {
951 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
952 (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
953 } else {
954 entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
955 (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
956 }
957 entry->bitmap_len = 0;
958
959 if (avail_type == WL_AVAIL_LOCAL) {
960 entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
961 /* Check for 5g support, based on that choose 5g channel */
962 if (cfg->support_5g) {
963 entry->u.channel_info =
964 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
965 WL_AVAIL_BANDWIDTH_5G));
966 } else {
967 entry->u.channel_info =
968 htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
969 WL_AVAIL_BANDWIDTH_2G));
970 }
971 entry->flags = htod16(entry->flags);
972 }
973
974 if (cfg->support_5g) {
975 a = WL_5G_AVAIL_BIT_MAP;
976 }
977
978 /* point to bitmap value for processing */
979 if (cmd_data->bmap) {
980 for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
981 i = cmd_data->bmap >> c;
982 if (i & 1) {
983 setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
984 }
985 }
986 } else {
987 for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
988 if (*a == '1') {
989 setbit(entry->bitmap, i);
990 }
991 a++;
992 }
993 }
994
995 /* account for partially filled most significant byte */
996 entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
997 if (avail_type == WL_AVAIL_NDC) {
998 memcpy(&avail->addr, ndc_id, ETHER_ADDR_LEN);
999 } else if (avail_type == WL_AVAIL_RANGING) {
1000 memcpy(&avail->addr, &cmd_data->peer_nmi, ETHER_ADDR_LEN);
1001 }
1002 /* account for partially filled most significant byte */
1003
1004 /* update wl_avail and populate wl_avail_entry */
1005 entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
1006 avail->num_entries++;
1007 avail->length += entry->length;
1008 /* advance pointer for next entry */
1009 p += entry->length;
1010
1011 /* convert to dongle endianness */
1012 entry->length = htod16(entry->length);
1013 entry->start_offset = htod16(entry->start_offset);
1014 entry->u.channel_info = htod32(entry->u.channel_info);
1015 entry->flags = htod16(entry->flags);
1016 /* update avail_len only if
1017 * there are avail entries
1018 */
1019 if (avail->num_entries) {
1020 nan_iov_data->nan_iov_len -= avail->length;
1021 avail->length = htod16(avail->length);
1022 avail->flags = htod16(avail->flags);
1023 }
1024 avail->length = htod16(avail->length);
1025
1026 sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
1027 sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
1028 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1029
1030 nan_buf->is_set = true;
1031 nan_buf->count++;
1032
1033 /* Reduce the iov_len size by subcmd_len */
1034 nan_iov_data->nan_iov_len -= subcmd_len;
1035 nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
1036
1037 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1038 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1039 if (unlikely(ret) || unlikely(status)) {
1040 WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
1041 ret = status;
1042 goto fail;
1043 }
1044 } else if (status == BCME_OK) {
1045 WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
1046 } else {
1047 WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
1048 }
1049
1050 fail:
1051 if (nan_buf) {
1052 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1053 }
1054 if (nan_iov_data) {
1055 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
1056 }
1057
1058 NAN_DBG_EXIT();
1059 return ret;
1060 }
1061
1062 static int
1063 wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1064 uint32 flag, uint32 *status, bool set)
1065 {
1066 bcm_iov_batch_buf_t *nan_buf = NULL;
1067 s32 ret = BCME_OK;
1068 uint16 nan_iov_start, nan_iov_end;
1069 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1070 uint16 subcmd_len;
1071 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1072 bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
1073 wl_nan_iov_t *nan_iov_data = NULL;
1074 uint32 cfg_ctrl;
1075 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1076
1077 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
1078
1079 NAN_DBG_ENTER();
1080 WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d",
1081 __FUNCTION__, flag, set));
1082 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
1083 if (!nan_buf) {
1084 WL_ERR(("%s: memory allocation failed\n", __func__));
1085 ret = BCME_NOMEM;
1086 goto fail;
1087 }
1088
1089 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
1090 if (!nan_iov_data) {
1091 WL_ERR(("%s: memory allocation failed\n", __func__));
1092 ret = BCME_NOMEM;
1093 goto fail;
1094 }
1095
1096 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1097 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1098 nan_buf->count = 0;
1099 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1100 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1101 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1102
1103 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1104 sizeof(cfg_ctrl), &subcmd_len);
1105 if (unlikely(ret)) {
1106 WL_ERR(("nan_sub_cmd check failed\n"));
1107 goto fail;
1108 }
1109
1110 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_CONFIG);
1111 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cfg_ctrl);
1112 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1113
1114 nan_buf->is_set = false;
1115 nan_buf->count++;
1116
1117 /* Reduce the iov_len size by subcmd_len */
1118 nan_iov_data->nan_iov_len -= subcmd_len;
1119 nan_iov_end = nan_iov_data->nan_iov_len;
1120 nan_buf_size = (nan_iov_start - nan_iov_end);
1121
1122 memset(resp_buf, 0, sizeof(resp_buf));
1123 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1124 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1125 if (unlikely(ret) || unlikely(*status)) {
1126 WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
1127 goto fail;
1128 }
1129 sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
1130
1131 /* check the response buff */
1132 cfg_ctrl = (*(uint32 *)&sub_cmd_resp->data[0]);
1133 if (set) {
1134 cfg_ctrl |= flag;
1135 } else {
1136 cfg_ctrl &= ~flag;
1137 }
1138 memcpy(sub_cmd->data, &cfg_ctrl, sizeof(cfg_ctrl));
1139 nan_buf->is_set = true;
1140 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
1141 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1142 if (unlikely(ret) || unlikely(*status)) {
1143 WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
1144 goto fail;
1145 }
1146 WL_DBG(("set nan cfg ctrl successfull\n"));
1147 fail:
1148 if (nan_buf) {
1149 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1150 }
1151 if (nan_iov_data) {
1152 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
1153 }
1154
1155 NAN_DBG_EXIT();
1156 return ret;
1157 }
1158
1159 static int
1160 wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
1161 {
1162 bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
1163 uint32 status;
1164 /* if all tlvs are parsed, we should not be here */
1165 if (b_resp->count == 0) {
1166 return BCME_BADLEN;
1167 }
1168
1169 /* cbfn params may be used in f/w */
1170 if (len < sizeof(status)) {
1171 return BCME_BUFTOOSHORT;
1172 }
1173
1174 /* first 4 bytes consists status */
1175 memcpy(&status, data, sizeof(uint32));
1176 status = dtoh32(status);
1177
1178 /* If status is non zero */
1179 if (status != BCME_OK) {
1180 printf("cmd type %d failed, status: %04x\n", type, status);
1181 goto exit;
1182 }
1183
1184 if (b_resp->count > 0) {
1185 b_resp->count--;
1186 }
1187
1188 if (!b_resp->count) {
1189 status = BCME_IOV_LAST_CMD;
1190 }
1191 exit:
1192 return status;
1193 }
1194
1195 static int
1196 wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1197 bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
1198 uint8 *resp_buf, uint16 resp_buf_size)
1199 {
1200 int ret = BCME_OK;
1201 uint16 tlvs_len;
1202 int res = BCME_OK;
1203 bcm_iov_batch_buf_t *p_resp = NULL;
1204 char *iov = "nan";
1205 int max_resp_len = WLC_IOCTL_MAXLEN;
1206
1207 WL_DBG(("Enter:\n"));
1208 if (nan_buf->is_set) {
1209 ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
1210 resp_buf, resp_buf_size, NULL);
1211 p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
1212 } else {
1213 ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
1214 resp_buf, resp_buf_size, NULL);
1215 p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
1216 }
1217 if (unlikely(ret)) {
1218 WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
1219 goto fail;
1220 }
1221
1222 p_resp->is_set = nan_buf->is_set;
1223 tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1224
1225 /* Extract the tlvs and print their resp in cb fn */
1226 res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
1227 tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
1228
1229 if (res == BCME_IOV_LAST_CMD) {
1230 res = BCME_OK;
1231 }
1232 fail:
1233 *status = res;
1234 WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
1235 return ret;
1236
1237 }
1238
1239 static int
1240 wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
1241 struct ether_addr *if_addr)
1242 {
1243 /* nan enable */
1244 s32 ret = BCME_OK;
1245 uint16 subcmd_len;
1246
1247 NAN_DBG_ENTER();
1248
1249 if (p_buf != NULL) {
1250 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1251
1252 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1253 sizeof(*if_addr), &subcmd_len);
1254 if (unlikely(ret)) {
1255 WL_ERR(("nan_sub_cmd check failed\n"));
1256 goto fail;
1257 }
1258
1259 /* Fill the sub_command block */
1260 sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
1261 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
1262 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1263 memcpy(sub_cmd->data, (uint8 *)if_addr,
1264 sizeof(*if_addr));
1265
1266 *nan_buf_size -= subcmd_len;
1267 } else {
1268 WL_ERR(("nan_iov_buf is NULL\n"));
1269 ret = BCME_ERROR;
1270 goto fail;
1271 }
1272
1273 fail:
1274 NAN_DBG_EXIT();
1275 return ret;
1276 }
1277
1278 static int
1279 wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
1280 {
1281 s32 ret = BCME_OK;
1282 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1283 uint32 status;
1284 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1285 struct ether_addr if_addr;
1286 uint8 buf[NAN_IOCTL_BUF_SIZE];
1287 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
1288 bool rand_mac = cfg->nancfg.mac_rand;
1289
1290 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1291 nan_buf->count = 0;
1292 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1293 if (rand_mac) {
1294 RANDOM_BYTES(if_addr.octet, 6);
1295 /* restore mcast and local admin bits to 0 and 1 */
1296 ETHER_SET_UNICAST(if_addr.octet);
1297 ETHER_SET_LOCALADDR(if_addr.octet);
1298 } else {
1299 /* Use primary MAC with the locally administered bit for the
1300 * NAN NMI I/F
1301 */
1302 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
1303 if_addr.octet) != BCME_OK) {
1304 ret = -EINVAL;
1305 goto fail;
1306 }
1307 }
1308 WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
1309 __FUNCTION__, MAC2STRDBG(if_addr.octet)));
1310 ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
1311 &nan_buf_size, &if_addr);
1312 if (unlikely(ret)) {
1313 WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
1314 goto fail;
1315 }
1316 nan_buf->count++;
1317 nan_buf->is_set = true;
1318 nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
1319 memset(resp_buf, 0, sizeof(resp_buf));
1320 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
1321 nan_buf, nan_buf_size, &status,
1322 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1323 if (unlikely(ret) || unlikely(status)) {
1324 WL_ERR(("nan if addr handler failed ret %d status %d\n",
1325 ret, status));
1326 goto fail;
1327 }
1328 memcpy(cfg->nan_nmi_mac, if_addr.octet, ETH_ALEN);
1329 return ret;
1330 fail:
1331 if (!rand_mac) {
1332 wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
1333 }
1334
1335 return ret;
1336 }
1337
1338 static int
1339 wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
1340 {
1341 /* nan enable */
1342 s32 ret = BCME_OK;
1343 uint16 subcmd_len;
1344
1345 NAN_DBG_ENTER();
1346
1347 if (p_buf != NULL) {
1348 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
1349
1350 ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
1351 sizeof(val), &subcmd_len);
1352 if (unlikely(ret)) {
1353 WL_ERR(("nan_sub_cmd check failed\n"));
1354 goto fail;
1355 }
1356
1357 /* Fill the sub_command block */
1358 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
1359 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1360 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1361 memcpy(sub_cmd->data, (uint8*)&val, sizeof(uint8));
1362
1363 *nan_buf_size -= subcmd_len;
1364 } else {
1365 WL_ERR(("nan_iov_buf is NULL\n"));
1366 ret = BCME_ERROR;
1367 goto fail;
1368 }
1369
1370 fail:
1371 NAN_DBG_EXIT();
1372 return ret;
1373 }
1374
1375 static int
1376 wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
1377 {
1378 /* nan enable */
1379 s32 ret = BCME_OK;
1380 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1381 uint16 subcmd_len;
1382
1383 NAN_DBG_ENTER();
1384
1385 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1386
1387 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1388 sizeof(val), &subcmd_len);
1389 if (unlikely(ret)) {
1390 WL_ERR(("nan_sub_cmd check failed\n"));
1391 return ret;
1392 }
1393
1394 /* Fill the sub_command block */
1395 sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
1396 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
1397 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1398 memcpy(sub_cmd->data, (uint8*)&val, sizeof(uint8));
1399
1400 nan_iov_data->nan_iov_len -= subcmd_len;
1401 nan_iov_data->nan_iov_buf += subcmd_len;
1402 NAN_DBG_EXIT();
1403 return ret;
1404 }
1405
1406 static int
1407 wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
1408 wl_nan_iov_t *nan_iov_data)
1409 {
1410 /* wl nan warm_up_time */
1411 s32 ret = BCME_OK;
1412 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1413 wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
1414 uint16 subcmd_len;
1415 NAN_DBG_ENTER();
1416
1417 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1418 wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
1419
1420 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1421 sizeof(*wup_ticks), &subcmd_len);
1422 if (unlikely(ret)) {
1423 WL_ERR(("nan_sub_cmd check failed\n"));
1424 return ret;
1425 }
1426 /* Fill the sub_command block */
1427 sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
1428 sub_cmd->len = sizeof(sub_cmd->u.options) +
1429 sizeof(*wup_ticks);
1430 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1431 *wup_ticks = cmd_data->warmup_time;
1432
1433 nan_iov_data->nan_iov_len -= subcmd_len;
1434 nan_iov_data->nan_iov_buf += subcmd_len;
1435
1436 NAN_DBG_EXIT();
1437 return ret;
1438 }
1439
1440 static int
1441 wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
1442 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1443 {
1444 s32 ret = BCME_OK;
1445 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1446 wl_nan_election_metric_config_t *metrics = NULL;
1447 uint16 subcmd_len;
1448 NAN_DBG_ENTER();
1449
1450 sub_cmd =
1451 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1452 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1453 sizeof(*metrics), &subcmd_len);
1454 if (unlikely(ret)) {
1455 WL_ERR(("nan_sub_cmd check failed\n"));
1456 goto fail;
1457 }
1458
1459 metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
1460
1461 if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
1462 metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
1463 }
1464
1465 if ((!cmd_data->metrics.master_pref) ||
1466 (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
1467 WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
1468 /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
1469 metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
1470 } else {
1471 metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
1472 }
1473 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
1474 sub_cmd->len = sizeof(sub_cmd->u.options) +
1475 sizeof(*metrics);
1476 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1477
1478 nan_iov_data->nan_iov_len -= subcmd_len;
1479 nan_iov_data->nan_iov_buf += subcmd_len;
1480
1481 fail:
1482 NAN_DBG_EXIT();
1483 return ret;
1484 }
1485
1486 static int
1487 wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
1488 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1489 {
1490 s32 ret = BCME_OK;
1491 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1492 wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
1493 uint16 subcmd_len;
1494
1495 NAN_DBG_ENTER();
1496 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1497
1498 rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
1499
1500 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1501 sizeof(*rssi_notif_thld), &subcmd_len);
1502 if (unlikely(ret)) {
1503 WL_ERR(("nan_sub_cmd check failed\n"));
1504 return ret;
1505 }
1506 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
1507 rssi_notif_thld->bcn_rssi_2g =
1508 cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
1509 } else {
1510 /* Keeping RSSI threshold value to be -70dBm */
1511 rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
1512 }
1513
1514 if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
1515 rssi_notif_thld->bcn_rssi_5g =
1516 cmd_data->rssi_attr.rssi_proximity_5g_val;
1517 } else {
1518 /* Keeping RSSI threshold value to be -70dBm */
1519 rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
1520 }
1521
1522 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
1523 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
1524 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1525
1526 nan_iov_data->nan_iov_len -= subcmd_len;
1527 nan_iov_data->nan_iov_buf += subcmd_len;
1528
1529 NAN_DBG_EXIT();
1530 return ret;
1531 }
1532
1533 static int
1534 wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
1535 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1536 {
1537 s32 ret = BCME_OK;
1538 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1539 wl_nan_rssi_thld_t *rssi_thld = NULL;
1540 uint16 subcmd_len;
1541
1542 NAN_DBG_ENTER();
1543 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1544 rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
1545
1546 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1547 sizeof(*rssi_thld), &subcmd_len);
1548 if (unlikely(ret)) {
1549 WL_ERR(("nan_sub_cmd check failed\n"));
1550 return ret;
1551 }
1552
1553 /*
1554 * Keeping RSSI mid value -75dBm for both 2G and 5G
1555 * Keeping RSSI close value -60dBm for both 2G and 5G
1556 */
1557 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
1558 rssi_thld->rssi_mid_2g =
1559 cmd_data->rssi_attr.rssi_middle_2dot4g_val;
1560 } else {
1561 rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
1562 }
1563
1564 if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
1565 rssi_thld->rssi_mid_5g =
1566 cmd_data->rssi_attr.rssi_middle_5g_val;
1567 } else {
1568 rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
1569 }
1570
1571 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
1572 rssi_thld->rssi_close_2g =
1573 cmd_data->rssi_attr.rssi_close_2dot4g_val;
1574 } else {
1575 rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
1576 }
1577
1578 if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
1579 rssi_thld->rssi_close_5g =
1580 cmd_data->rssi_attr.rssi_close_5g_val;
1581 } else {
1582 rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
1583 }
1584
1585 sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
1586 sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
1587 sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
1588
1589 nan_iov_data->nan_iov_len -= subcmd_len;
1590 nan_iov_data->nan_iov_buf += subcmd_len;
1591
1592 NAN_DBG_EXIT();
1593 return ret;
1594 }
1595
1596 static int
1597 check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
1598 {
1599 s32 ret = BCME_OK;
1600 uint bitmap;
1601 u8 ioctl_buf[WLC_IOCTL_SMLEN];
1602 uint32 chanspec_arg;
1603 NAN_DBG_ENTER();
1604
1605 chanspec_arg = CH20MHZ_CHSPEC(chan);
1606 chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
1607 memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
1608 ret = wldev_iovar_getbuf(ndev, "per_chan_info", (void *)&chanspec_arg, sizeof(chanspec_arg),
1609 ioctl_buf, WLC_IOCTL_SMLEN, NULL);
1610 if (ret != BCME_OK) {
1611 WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
1612 goto exit;
1613 }
1614
1615 bitmap = dtoh32(*(uint *)ioctl_buf);
1616 if (!(bitmap & WL_CHAN_VALID_HW)) {
1617 WL_ERR(("Invalid channel\n"));
1618 ret = BCME_BADCHAN;
1619 goto exit;
1620 }
1621
1622 if (!(bitmap & WL_CHAN_VALID_SW)) {
1623 WL_ERR(("Not supported in current locale\n"));
1624 ret = BCME_BADCHAN;
1625 goto exit;
1626 }
1627 exit:
1628 NAN_DBG_EXIT();
1629 return ret;
1630 }
1631
1632 static int
1633 wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
1634 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1635 {
1636 s32 ret = BCME_OK;
1637 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1638 wl_nan_social_channels_t *soc_chans = NULL;
1639 uint16 subcmd_len;
1640
1641 NAN_DBG_ENTER();
1642
1643 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1644 soc_chans =
1645 (wl_nan_social_channels_t *)sub_cmd->data;
1646
1647 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1648 sizeof(*soc_chans), &subcmd_len);
1649 if (unlikely(ret)) {
1650 WL_ERR(("nan_sub_cmd check failed\n"));
1651 return ret;
1652 }
1653
1654 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
1655 sub_cmd->len = sizeof(sub_cmd->u.options) +
1656 sizeof(*soc_chans);
1657 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1658 if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
1659 soc_chans->soc_chan_2g = cmd_data->chanspec[1];
1660 } else {
1661 soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
1662 }
1663
1664 if (cmd_data->support_5g) {
1665 if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
1666 soc_chans->soc_chan_5g = cmd_data->chanspec[2];
1667 } else {
1668 soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
1669 }
1670 ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
1671 if (ret != BCME_OK) {
1672 ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
1673 if (ret == BCME_OK) {
1674 soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
1675 } else {
1676 soc_chans->soc_chan_5g = 0;
1677 ret = BCME_OK;
1678 WL_ERR(("Current locale doesn't support 5G op"
1679 "continuing with 2G only operation\n"));
1680 }
1681 }
1682 } else {
1683 WL_DBG(("5G support is disabled\n"));
1684 }
1685 nan_iov_data->nan_iov_len -= subcmd_len;
1686 nan_iov_data->nan_iov_buf += subcmd_len;
1687
1688 NAN_DBG_EXIT();
1689 return ret;
1690 }
1691
1692 static int
1693 wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
1694 nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
1695 {
1696 bcm_iov_batch_buf_t *nan_buf = NULL;
1697 s32 ret = BCME_OK;
1698 uint16 nan_iov_start, nan_iov_end;
1699 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
1700 uint16 subcmd_len;
1701 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1702 wl_nan_iov_t *nan_iov_data = NULL;
1703 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
1704 wl_nan_scan_params_t *scan_params = NULL;
1705 uint32 status;
1706
1707 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
1708
1709 NAN_DBG_ENTER();
1710
1711 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
1712 if (!nan_buf) {
1713 WL_ERR(("%s: memory allocation failed\n", __func__));
1714 ret = BCME_NOMEM;
1715 goto fail;
1716 }
1717
1718 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
1719 if (!nan_iov_data) {
1720 WL_ERR(("%s: memory allocation failed\n", __func__));
1721 ret = BCME_NOMEM;
1722 goto fail;
1723 }
1724
1725 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
1726 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
1727 nan_buf->count = 0;
1728 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
1729 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
1730 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1731
1732 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1733 sizeof(*scan_params), &subcmd_len);
1734 if (unlikely(ret)) {
1735 WL_ERR(("nan_sub_cmd check failed\n"));
1736 goto fail;
1737 }
1738 scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
1739
1740 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
1741 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
1742 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1743
1744 if (!band_index) {
1745 /* Fw default: Dwell time for 2G is 210 */
1746 if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
1747 cmd_data->dwell_time[0]) {
1748 scan_params->dwell_time = cmd_data->dwell_time[0] +
1749 NAN_SCAN_DWELL_TIME_DELTA_MS;
1750 }
1751 /* Fw default: Scan period for 2G is 10 */
1752 if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
1753 scan_params->scan_period = cmd_data->scan_period[0];
1754 }
1755 } else {
1756 if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
1757 cmd_data->dwell_time[1]) {
1758 scan_params->dwell_time = cmd_data->dwell_time[1] +
1759 NAN_SCAN_DWELL_TIME_DELTA_MS;
1760 }
1761 if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
1762 scan_params->scan_period = cmd_data->scan_period[1];
1763 }
1764 }
1765 scan_params->band_index = band_index;
1766 nan_buf->is_set = true;
1767 nan_buf->count++;
1768
1769 /* Reduce the iov_len size by subcmd_len */
1770 nan_iov_data->nan_iov_len -= subcmd_len;
1771 nan_iov_end = nan_iov_data->nan_iov_len;
1772 nan_buf_size = (nan_iov_start - nan_iov_end);
1773
1774 memset(resp_buf, 0, sizeof(resp_buf));
1775 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
1776 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
1777 if (unlikely(ret) || unlikely(status)) {
1778 WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
1779 goto fail;
1780 }
1781 WL_DBG(("set nan scan params successfull\n"));
1782 fail:
1783 if (nan_buf) {
1784 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
1785 }
1786 if (nan_iov_data) {
1787 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
1788 }
1789
1790 NAN_DBG_EXIT();
1791 return ret;
1792 }
1793
1794 static int
1795 wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
1796 wl_nan_iov_t *nan_iov_data)
1797 {
1798 s32 ret = BCME_OK;
1799 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1800 uint16 subcmd_len;
1801
1802 NAN_DBG_ENTER();
1803
1804 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1805
1806 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1807 (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
1808 if (unlikely(ret)) {
1809 WL_ERR(("nan_sub_cmd check failed\n"));
1810 return ret;
1811 }
1812
1813 cmd_data->clus_id.octet[0] = 0x50;
1814 cmd_data->clus_id.octet[1] = 0x6F;
1815 cmd_data->clus_id.octet[2] = 0x9A;
1816 cmd_data->clus_id.octet[3] = 0x01;
1817 WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
1818
1819 sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
1820 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
1821 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1822 memcpy(sub_cmd->data, (uint8 *)&cmd_data->clus_id,
1823 sizeof(cmd_data->clus_id));
1824
1825 nan_iov_data->nan_iov_len -= subcmd_len;
1826 nan_iov_data->nan_iov_buf += subcmd_len;
1827
1828 NAN_DBG_EXIT();
1829 return ret;
1830 }
1831
1832 static int
1833 wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
1834 wl_nan_iov_t *nan_iov_data)
1835 {
1836 s32 ret = BCME_OK;
1837 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1838 wl_nan_hop_count_t *hop_limit = NULL;
1839 uint16 subcmd_len;
1840
1841 NAN_DBG_ENTER();
1842
1843 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1844 hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
1845
1846 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1847 sizeof(*hop_limit), &subcmd_len);
1848 if (unlikely(ret)) {
1849 WL_ERR(("nan_sub_cmd check failed\n"));
1850 return ret;
1851 }
1852
1853 *hop_limit = cmd_data->hop_count_limit;
1854 sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
1855 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
1856 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1857
1858 nan_iov_data->nan_iov_len -= subcmd_len;
1859 nan_iov_data->nan_iov_buf += subcmd_len;
1860
1861 NAN_DBG_EXIT();
1862 return ret;
1863 }
1864
1865 static int
1866 wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
1867 wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
1868 {
1869 s32 ret = BCME_OK;
1870 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1871 wl_nan_sid_beacon_control_t *sid_beacon = NULL;
1872 uint16 subcmd_len;
1873
1874 NAN_DBG_ENTER();
1875
1876 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1877
1878 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1879 sizeof(*sid_beacon), &subcmd_len);
1880 if (unlikely(ret)) {
1881 WL_ERR(("nan_sub_cmd check failed\n"));
1882 return ret;
1883 }
1884
1885 sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
1886 sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
1887 /* Need to have separate flag for sub beacons
1888 * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
1889 */
1890 if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
1891 /* Limit for number of publish SIDs to be included in Beacons */
1892 sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
1893 }
1894 if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
1895 /* Limit for number of subscribe SIDs to be included in Beacons */
1896 sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
1897 }
1898 sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
1899 sub_cmd->len = sizeof(sub_cmd->u.options) +
1900 sizeof(*sid_beacon);
1901 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1902
1903 nan_iov_data->nan_iov_len -= subcmd_len;
1904 nan_iov_data->nan_iov_buf += subcmd_len;
1905 NAN_DBG_EXIT();
1906 return ret;
1907 }
1908
1909 static int
1910 wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
1911 wl_nan_iov_t *nan_iov_data)
1912 {
1913 s32 ret = BCME_OK;
1914 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1915 uint16 subcmd_len;
1916
1917 NAN_DBG_ENTER();
1918
1919 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1920
1921 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1922 sizeof(cmd_data->nan_oui), &subcmd_len);
1923 if (unlikely(ret)) {
1924 WL_ERR(("nan_sub_cmd check failed\n"));
1925 return ret;
1926 }
1927
1928 sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
1929 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
1930 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
1931 memcpy(sub_cmd->data, (uint32 *)&cmd_data->nan_oui,
1932 sizeof(cmd_data->nan_oui));
1933
1934 nan_iov_data->nan_iov_len -= subcmd_len;
1935 nan_iov_data->nan_iov_buf += subcmd_len;
1936 NAN_DBG_EXIT();
1937 return ret;
1938 }
1939
1940 static int
1941 wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
1942 wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
1943 {
1944 s32 ret = BCME_OK;
1945 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
1946 wl_nan_awake_dws_t *awake_dws = NULL;
1947 uint16 subcmd_len;
1948 NAN_DBG_ENTER();
1949
1950 sub_cmd =
1951 (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
1952 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
1953 sizeof(*awake_dws), &subcmd_len);
1954 if (unlikely(ret)) {
1955 WL_ERR(("nan_sub_cmd check failed\n"));
1956 return ret;
1957 }
1958
1959 awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
1960
1961 if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
1962 awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
1963 if (!awake_dws->dw_interval_2g) {
1964 /* Set 2G awake dw value to fw default value 1 */
1965 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
1966 }
1967 } else {
1968 /* Set 2G awake dw value to fw default value 1 */
1969 awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
1970 }
1971
1972 if (cfg->support_5g) {
1973 if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
1974 awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
1975 if (!awake_dws->dw_interval_5g) {
1976 /* disable 5g beacon ctrls */
1977 ret = wl_cfgnan_config_control_flag(ndev, cfg,
1978 WL_NAN_CTRL_DISC_BEACON_TX_5G,
1979 &(cmd_data->status), 0);
1980 if (unlikely(ret) || unlikely(cmd_data->status)) {
1981 WL_ERR((" nan control set config handler,"
1982 " ret = %d status = %d \n",
1983 ret, cmd_data->status));
1984 goto fail;
1985 }
1986 ret = wl_cfgnan_config_control_flag(ndev, cfg,
1987 WL_NAN_CTRL_SYNC_BEACON_TX_5G,
1988 &(cmd_data->status), 0);
1989 if (unlikely(ret) || unlikely(cmd_data->status)) {
1990 WL_ERR((" nan control set config handler,"
1991 " ret = %d status = %d \n",
1992 ret, cmd_data->status));
1993 goto fail;
1994 }
1995 }
1996 } else {
1997 /* Set 5G awake dw value to fw default value 1 */
1998 awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
1999 }
2000 }
2001
2002 sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
2003 sub_cmd->len = sizeof(sub_cmd->u.options) +
2004 sizeof(*awake_dws);
2005 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2006
2007 nan_iov_data->nan_iov_len -= subcmd_len;
2008 nan_iov_data->nan_iov_buf += subcmd_len;
2009
2010 fail:
2011 NAN_DBG_EXIT();
2012 return ret;
2013 }
2014
2015 int
2016 wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2017 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2018 {
2019 s32 ret = BCME_OK;
2020 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2021 bcm_iov_batch_buf_t *nan_buf = NULL;
2022 wl_nan_iov_t *nan_iov_data = NULL;
2023 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2024 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2025 int i;
2026 s32 timeout = 0;
2027 bool mutex_locked = false;
2028
2029 NAN_DBG_ENTER();
2030 NAN_MUTEX_LOCK();
2031 mutex_locked = true;
2032
2033 if (!wl_cfg80211_check_for_nan_support(cfg)) {
2034 ret = BCME_UNSUPPORTED;
2035 goto fail;
2036 }
2037
2038 /* disable P2P */
2039 ret = wl_cfg80211_deinit_p2p_discovery(cfg);
2040 if (ret != BCME_OK) {
2041 WL_ERR(("Failed to disable p2p_disc during nan_enab"));
2042 }
2043 WL_ERR(("Initializing NAN\n"));
2044 ret = wl_cfgnan_init(cfg);
2045 if (ret != BCME_OK) {
2046 WL_ERR(("failed to initialize NAN[%d]\n", ret));
2047 goto fail;
2048 }
2049
2050 /* set nmi addr */
2051 ret = wl_cfgnan_set_if_addr(cfg);
2052 if (ret != BCME_OK) {
2053 WL_ERR(("Failed to set nmi address \n"));
2054 goto fail;
2055 }
2056
2057 for (i = 0; i < NAN_MAX_NDI; i++) {
2058 /* Create NDI using the information provided by user space */
2059 if (cfg->nancfg.ndi[i].in_use && !cfg->nancfg.ndi[i].created) {
2060 ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
2061 cfg->nancfg.ndi[i].ifname,
2062 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
2063 if (ret) {
2064 WL_ERR(("failed to create ndp interface [%d]\n", ret));
2065 goto fail;
2066 }
2067 cfg->nancfg.ndi[i].created = true;
2068 }
2069 }
2070
2071 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
2072 if (!nan_buf) {
2073 WL_ERR(("%s: memory allocation failed\n", __func__));
2074 ret = BCME_NOMEM;
2075 goto fail;
2076 }
2077
2078 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
2079 if (!nan_iov_data) {
2080 WL_ERR(("%s: memory allocation failed\n", __func__));
2081 ret = BCME_NOMEM;
2082 goto fail;
2083 }
2084
2085 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2086 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2087 nan_buf->count = 0;
2088 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2089 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2090
2091 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
2092 /* config sync/discovery beacons on 2G band */
2093 /* 2g is mandatory */
2094 if (!cmd_data->beacon_2g_val) {
2095 WL_ERR(("Invalid NAN config...2G is mandatory\n"));
2096 ret = BCME_BADARG;
2097 }
2098 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2099 WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G,
2100 &(cmd_data->status), TRUE);
2101 if (unlikely(ret) || unlikely(cmd_data->status)) {
2102 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2103 ret, cmd_data->status));
2104 goto fail;
2105 }
2106 }
2107 if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
2108 /* config sync/discovery beacons on 5G band */
2109 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2110 WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
2111 &(cmd_data->status), cmd_data->beacon_5g_val);
2112 if (unlikely(ret) || unlikely(cmd_data->status)) {
2113 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2114 ret, cmd_data->status));
2115 goto fail;
2116 }
2117 }
2118 /* Setting warm up time */
2119 cmd_data->warmup_time = 1;
2120 if (cmd_data->warmup_time) {
2121 ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
2122 if (unlikely(ret)) {
2123 WL_ERR(("warm up time handler sub_cmd set failed\n"));
2124 goto fail;
2125 }
2126 nan_buf->count++;
2127 }
2128 /* setting master preference and random factor */
2129 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
2130 if (unlikely(ret)) {
2131 WL_ERR(("election_metric sub_cmd set failed\n"));
2132 goto fail;
2133 } else {
2134 nan_buf->count++;
2135 }
2136
2137 /* setting nan social channels */
2138 ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
2139 if (unlikely(ret)) {
2140 WL_ERR(("nan social channels set failed\n"));
2141 goto fail;
2142 } else {
2143 /* Storing 5g capability which is reqd for avail chan config. */
2144 cfg->support_5g = cmd_data->support_5g;
2145 nan_buf->count++;
2146 }
2147
2148 if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
2149 (cmd_data->scan_period[0]))) {
2150 /* setting scan params */
2151 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2152 if (unlikely(ret)) {
2153 WL_ERR(("scan params set failed for 2g\n"));
2154 goto fail;
2155 }
2156 }
2157
2158 if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
2159 (cmd_data->scan_period[1]))) {
2160 /* setting scan params */
2161 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
2162 cmd_data->support_5g, nan_attr_mask);
2163 if (unlikely(ret)) {
2164 WL_ERR(("scan params set failed for 5g\n"));
2165 goto fail;
2166 }
2167 }
2168
2169 /*
2170 * A cluster_low value matching cluster_high indicates a request
2171 * to join a cluster with that value.
2172 * If the requested cluster is not found the
2173 * device will start its own cluster
2174 */
2175 /* For Debug purpose, using clust id compulsion */
2176 if (!ETHER_ISNULLADDR(&cmd_data->clus_id.octet)) {
2177 if (cmd_data->clus_id.octet[4] == cmd_data->clus_id.octet[5]) {
2178 /* device will merge to configured CID only */
2179 ret = wl_cfgnan_config_control_flag(ndev, cfg,
2180 WL_NAN_CTRL_MERGE_CONF_CID_ONLY, &(cmd_data->status), true);
2181 if (unlikely(ret) || unlikely(cmd_data->status)) {
2182 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2183 ret, cmd_data->status));
2184 goto fail;
2185 }
2186 }
2187 /* setting cluster ID */
2188 ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
2189 if (unlikely(ret)) {
2190 WL_ERR(("cluster_id sub_cmd set failed\n"));
2191 goto fail;
2192 }
2193 nan_buf->count++;
2194 }
2195
2196 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2197 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
2198 if (unlikely(ret)) {
2199 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2200 goto fail;
2201 } else {
2202 nan_buf->count++;
2203 }
2204
2205 /* setting rssi middle/close values for 2.4GHz and 5GHz */
2206 ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
2207 if (unlikely(ret)) {
2208 WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
2209 goto fail;
2210 } else {
2211 nan_buf->count++;
2212 }
2213
2214 /* setting hop count limit or threshold */
2215 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2216 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2217 if (unlikely(ret)) {
2218 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2219 goto fail;
2220 }
2221 nan_buf->count++;
2222 }
2223
2224 /* setting sid beacon val */
2225 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2226 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2227 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2228 if (unlikely(ret)) {
2229 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2230 goto fail;
2231 }
2232 nan_buf->count++;
2233 }
2234
2235 /* setting nan oui */
2236 if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
2237 ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
2238 if (unlikely(ret)) {
2239 WL_ERR(("nan_oui sub_cmd set failed\n"));
2240 goto fail;
2241 }
2242 nan_buf->count++;
2243 }
2244
2245 /* setting nan awake dws */
2246 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
2247 nan_iov_data, cfg, nan_attr_mask);
2248 if (unlikely(ret)) {
2249 WL_ERR(("nan awake dws set failed\n"));
2250 goto fail;
2251 } else {
2252 nan_buf->count++;
2253 }
2254
2255 /* enable events */
2256 ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
2257 if (unlikely(ret)) {
2258 goto fail;
2259 }
2260
2261 /* setting nan enable sub_cmd */
2262 ret = wl_cfgnan_enable_handler(nan_iov_data, true);
2263 if (unlikely(ret)) {
2264 WL_ERR(("enable handler sub_cmd set failed\n"));
2265 goto fail;
2266 }
2267 nan_buf->count++;
2268 nan_buf->is_set = true;
2269
2270 nan_buf_size -= nan_iov_data->nan_iov_len;
2271 memset(resp_buf, 0, sizeof(resp_buf));
2272 mutex_locked = false;
2273 /* Reset conditon variable */
2274 cfg->nancfg.nan_event_recvd = false;
2275 /* Releasing lock to allow event processing */
2276 NAN_MUTEX_UNLOCK();
2277 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2278 &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2279 if (unlikely(ret) || unlikely(cmd_data->status)) {
2280 WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
2281 ret, cmd_data->status));
2282 goto fail;
2283 }
2284 timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
2285 cfg->nancfg.nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2286 if (!timeout) {
2287 WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
2288 ret = BCME_ERROR;
2289 goto fail;
2290 }
2291
2292 /* If set, auto datapath confirms will be sent by FW */
2293 ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_AUTO_DPCONF,
2294 &(cmd_data->status), true);
2295 if (unlikely(ret) || unlikely(cmd_data->status)) {
2296 WL_ERR((" nan control set config handler, ret = %d status = %d \n",
2297 ret, cmd_data->status));
2298 goto fail;
2299 }
2300 WL_INFORM_MEM(("[NAN] Enable successfull \n"));
2301 fail:
2302 /* reset conditon variable */
2303 cfg->nancfg.nan_event_recvd = false;
2304 if (unlikely(ret) || unlikely(cmd_data->status)) {
2305 for (i = 0; i < NAN_MAX_NDI; i++) {
2306 if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created) {
2307 WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i));
2308 ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
2309 (char*)cfg->nancfg.ndi[i].ifname,
2310 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
2311 if (ret) {
2312 WL_ERR(("failed to delete ndp iface [%d]\n", ret));
2313 }
2314 }
2315 }
2316 }
2317 if (nan_buf) {
2318 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2319 }
2320 if (nan_iov_data) {
2321 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
2322 }
2323
2324 if (mutex_locked)
2325 NAN_MUTEX_UNLOCK();
2326
2327 NAN_DBG_EXIT();
2328 return ret;
2329 }
2330
2331 int
2332 wl_cfgnan_disable(struct bcm_cfg80211 *cfg, nan_stop_reason_code_t reason)
2333 {
2334 s32 ret = BCME_OK;
2335 dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
2336 int i = 0;
2337
2338 NAN_DBG_ENTER();
2339 if (cfg->nan_enable) {
2340 struct net_device *ndev;
2341 ndev = bcmcfg_to_prmry_ndev(cfg);
2342 cfg->nancfg.disable_reason = reason;
2343 ret = wl_cfgnan_stop_handler(ndev, cfg, false);
2344 if (ret != BCME_OK) {
2345 WL_ERR(("failed to stop nan, error[%d]\n", ret));
2346 }
2347 /* We have to remove NDIs so that P2P/Softap can work */
2348 for (i = 0; i < NAN_MAX_NDI; i++) {
2349 if (cfg->nancfg.ndi[i].in_use && cfg->nancfg.ndi[i].created) {
2350 WL_INFORM_MEM(("Deleting NAN NDI IDX:%d\n", i));
2351 ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
2352 (char*)cfg->nancfg.ndi[i].ifname,
2353 NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
2354 if (ret) {
2355 WL_ERR(("failed to delete ndp iface [%d]\n", ret));
2356 }
2357 cfg->nancfg.ndi[i].created = false;
2358 }
2359 }
2360 ret = wl_cfgnan_deinit(cfg, dhdp->up);
2361 if (ret != BCME_OK) {
2362 WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
2363 }
2364 }
2365 NAN_DBG_EXIT();
2366 return ret;
2367 }
2368
2369 static s32
2370 wl_cfgnan_send_stop_event(nan_event_data_t *nan_event_data, struct bcm_cfg80211 *cfg)
2371 {
2372 s32 ret = BCME_OK;
2373 NAN_DBG_ENTER();
2374 memset(nan_event_data, 0, NAN_IOCTL_BUF_SIZE);
2375 nan_event_data->status = NAN_STATUS_SUCCESS;
2376 memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
2377 strlen("NAN_STATUS_SUCCESS"));
2378 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
2379 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
2380 GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
2381 if (ret != BCME_OK) {
2382 WL_ERR(("Failed to send event to nan hal, (%d)\n",
2383 GOOGLE_NAN_EVENT_DISABLED));
2384 }
2385 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
2386 WL_INFORM(("Sending disabled event if Bus is down\n"));
2387 /* Resetting instance ID mask */
2388 cfg->nancfg.inst_id_start = 0;
2389 memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
2390 memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
2391 cfg->nan_enable = false;
2392 NAN_DBG_EXIT();
2393 return ret;
2394 }
2395
2396 int
2397 wl_cfgnan_stop_handler(struct net_device *ndev,
2398 struct bcm_cfg80211 *cfg, bool disable_events)
2399 {
2400 bcm_iov_batch_buf_t *nan_buf = NULL;
2401 s32 ret = BCME_ERROR;
2402 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2403 wl_nan_iov_t *nan_iov_data = NULL;
2404 uint32 status;
2405 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2406 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2407 uint8 buf[NAN_IOCTL_BUF_SIZE];
2408 nan_event_data_t *nan_event_data = (nan_event_data_t*)buf;
2409 s32 timeout;
2410 bool mutex_locked = false;
2411
2412 NAN_DBG_ENTER();
2413 NAN_MUTEX_LOCK();
2414 mutex_locked = true;
2415
2416 if (!cfg->nan_enable) {
2417 WL_INFORM(("Nan is not enabled\n"));
2418 ret = BCME_OK;
2419 goto fail;
2420 }
2421
2422 if (cfg->nancfg.disable_reason != NAN_BUS_IS_DOWN) {
2423 /*
2424 * Framework doing cleanup(iface remove) on disable command,
2425 * so avoiding event to prevent iface delete calls again
2426 */
2427 if (disable_events) {
2428 WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
2429 wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
2430 }
2431 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
2432 if (!nan_buf) {
2433 WL_ERR(("%s: memory allocation failed\n", __func__));
2434 ret = BCME_NOMEM;
2435 goto fail;
2436 }
2437
2438 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
2439 if (!nan_iov_data) {
2440 WL_ERR(("%s: memory allocation failed\n", __func__));
2441 ret = BCME_NOMEM;
2442 goto fail;
2443 }
2444
2445 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2446 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2447 nan_buf->count = 0;
2448 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2449 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2450
2451 ret = wl_cfgnan_enable_handler(nan_iov_data, false);
2452 if (unlikely(ret)) {
2453 WL_ERR(("nan disable handler failed\n"));
2454 goto fail;
2455 }
2456 nan_buf->count++;
2457 nan_buf->is_set = true;
2458 nan_buf_size -= nan_iov_data->nan_iov_len;
2459 memset(resp_buf, 0, sizeof(resp_buf));
2460 mutex_locked = false;
2461 /* reset conditon variable */
2462 cfg->nancfg.nan_event_recvd = false;
2463 /* Releasing lock to allow event processing */
2464 NAN_MUTEX_UNLOCK();
2465 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
2466 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2467 if (unlikely(ret) || unlikely(status)) {
2468 WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
2469 goto fail;
2470 }
2471 cfg->nan_enable = false;
2472 timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
2473 cfg->nancfg.nan_event_recvd,
2474 msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
2475 if (!timeout) {
2476 WL_ERR(("Timed out while Waiting for"
2477 " WL_NAN_EVENT_STOP event !!!\n"));
2478 ret = BCME_ERROR;
2479 goto fail;
2480 }
2481 WL_INFORM_MEM(("[NAN] Disable done\n"));
2482 } else {
2483 /* Sending up NAN disabled event, to clear the nan state in framework */
2484 ret = wl_cfgnan_send_stop_event(nan_event_data, cfg);
2485 }
2486 fail:
2487 /* reset conditon variable */
2488 cfg->nancfg.nan_event_recvd = false;
2489 if (nan_buf) {
2490 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2491 }
2492 if (nan_iov_data) {
2493 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
2494 }
2495
2496 if (mutex_locked)
2497 NAN_MUTEX_UNLOCK();
2498 NAN_DBG_EXIT();
2499 return ret;
2500 }
2501
2502 int
2503 wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
2504 nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
2505 {
2506 bcm_iov_batch_buf_t *nan_buf = NULL;
2507 s32 ret = BCME_OK;
2508 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2509 wl_nan_iov_t *nan_iov_data = NULL;
2510 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2511 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2512
2513 NAN_DBG_ENTER();
2514
2515 /* Nan need to be enabled before configuring/updating params */
2516 if (cfg->nan_enable) {
2517 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
2518 if (!nan_buf) {
2519 WL_ERR(("%s: memory allocation failed\n", __func__));
2520 ret = BCME_NOMEM;
2521 goto fail;
2522 }
2523
2524 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
2525 if (!nan_iov_data) {
2526 WL_ERR(("%s: memory allocation failed\n", __func__));
2527 ret = BCME_NOMEM;
2528 goto fail;
2529 }
2530
2531 nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
2532 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2533 nan_buf->count = 0;
2534 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2535 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2536
2537 /* setting sid beacon val */
2538 if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
2539 (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
2540 ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
2541 if (unlikely(ret)) {
2542 WL_ERR(("sid_beacon sub_cmd set failed\n"));
2543 goto fail;
2544 }
2545 nan_buf->count++;
2546 }
2547
2548 /* setting master preference and random factor */
2549 if (cmd_data->metrics.random_factor ||
2550 cmd_data->metrics.master_pref) {
2551 ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
2552 nan_attr_mask);
2553 if (unlikely(ret)) {
2554 WL_ERR(("election_metric sub_cmd set failed\n"));
2555 goto fail;
2556 } else {
2557 nan_buf->count++;
2558 }
2559 }
2560
2561 /* setting hop count limit or threshold */
2562 if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
2563 ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
2564 if (unlikely(ret)) {
2565 WL_ERR(("hop_count_limit sub_cmd set failed\n"));
2566 goto fail;
2567 }
2568 nan_buf->count++;
2569 }
2570
2571 /* setting rssi proximaty values for 2.4GHz and 5GHz */
2572 ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
2573 nan_attr_mask);
2574 if (unlikely(ret)) {
2575 WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
2576 goto fail;
2577 } else {
2578 nan_buf->count++;
2579 }
2580
2581 /* setting nan awake dws */
2582 ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
2583 cfg, nan_attr_mask);
2584 if (unlikely(ret)) {
2585 WL_ERR(("nan awake dws set failed\n"));
2586 goto fail;
2587 } else {
2588 nan_buf->count++;
2589 }
2590
2591 if (cmd_data->disc_ind_cfg) {
2592 /* Disable events */
2593 WL_TRACE(("Disable events based on flag\n"));
2594 ret = wl_cfgnan_config_eventmask(ndev, cfg,
2595 cmd_data->disc_ind_cfg, false);
2596 if (unlikely(ret)) {
2597 goto fail;
2598 }
2599 }
2600
2601 if ((cfg->support_5g) && ((cmd_data->dwell_time[1]) ||
2602 (cmd_data->scan_period[1]))) {
2603 /* setting scan params */
2604 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
2605 cmd_data, cfg->support_5g, nan_attr_mask);
2606 if (unlikely(ret)) {
2607 WL_ERR(("scan params set failed for 5g\n"));
2608 goto fail;
2609 }
2610 }
2611 if ((cmd_data->dwell_time[0]) ||
2612 (cmd_data->scan_period[0])) {
2613 ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
2614 if (unlikely(ret)) {
2615 WL_ERR(("scan params set failed for 2g\n"));
2616 goto fail;
2617 }
2618 }
2619 nan_buf->is_set = true;
2620 nan_buf_size -= nan_iov_data->nan_iov_len;
2621
2622 if (nan_buf->count) {
2623 memset(resp_buf, 0, sizeof(resp_buf));
2624 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
2625 &(cmd_data->status),
2626 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2627 if (unlikely(ret) || unlikely(cmd_data->status)) {
2628 WL_ERR((" nan config handler failed ret = %d status = %d\n",
2629 ret, cmd_data->status));
2630 goto fail;
2631 }
2632 } else {
2633 WL_DBG(("No commands to send\n"));
2634 }
2635
2636 if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
2637 (!cmd_data->chanspec[0])) {
2638 WL_TRACE(("mandatory arguments are not present to set avail\n"));
2639 ret = BCME_OK;
2640 } else {
2641 cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
2642 cmd_data->avail_params.bmap = cmd_data->bmap;
2643 /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
2644 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
2645 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
2646 if (unlikely(ret)) {
2647 WL_ERR(("Failed to set avail value with type local\n"));
2648 goto fail;
2649 }
2650
2651 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
2652 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
2653 if (unlikely(ret)) {
2654 WL_ERR(("Failed to set avail value with type ndc\n"));
2655 goto fail;
2656 }
2657 }
2658 } else {
2659 WL_INFORM(("nan is not enabled\n"));
2660 }
2661
2662 fail:
2663 if (nan_buf) {
2664 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2665 }
2666 if (nan_iov_data) {
2667 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
2668 }
2669
2670 NAN_DBG_EXIT();
2671 return ret;
2672 }
2673
2674 int
2675 wl_cfgnan_support_handler(struct net_device *ndev,
2676 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
2677 {
2678 /* TODO: */
2679 return BCME_OK;
2680 }
2681
2682 int
2683 wl_cfgnan_status_handler(struct net_device *ndev,
2684 struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
2685 {
2686 /* TODO: */
2687 return BCME_OK;
2688 }
2689
2690 #ifdef WL_NAN_DISC_CACHE
2691 static
2692 nan_svc_info_t *
2693 wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
2694 wl_nan_instance_id svc_inst_id, uint8 ndp_id)
2695 {
2696 uint8 i, j;
2697 if (ndp_id) {
2698 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
2699 for (j = 0; j < NAN_MAX_SVC_INST; j++) {
2700 if (cfg->svc_info[i].ndp_id[j] == ndp_id) {
2701 return &cfg->svc_info[i];
2702 }
2703 }
2704 }
2705 } else if (svc_inst_id) {
2706 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
2707 if (cfg->svc_info[i].svc_id == svc_inst_id) {
2708 return &cfg->svc_info[i];
2709 }
2710 }
2711
2712 }
2713 return NULL;
2714 }
2715
2716 static
2717 nan_ranging_inst_t *
2718 wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
2719 {
2720 uint8 i;
2721 if (peer) {
2722 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
2723 if (!memcmp(peer, &cfg->nan_ranging_info[i].peer_addr,
2724 ETHER_ADDR_LEN)) {
2725 return &(cfg->nan_ranging_info[i]);
2726 }
2727 }
2728 }
2729 return NULL;
2730 }
2731
2732 static
2733 nan_ranging_inst_t *
2734 wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
2735 uint8 svc_id, bool create)
2736 {
2737 nan_ranging_inst_t *ranging_inst = NULL;
2738 uint8 i;
2739
2740 ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
2741 if (ranging_inst) {
2742 goto done;
2743 }
2744 if (create) {
2745 WL_TRACE(("Creating Ranging instance \n"));
2746 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
2747 if (cfg->nan_ranging_info[i].range_id == 0)
2748 break;
2749 }
2750 if (i == NAN_MAX_RANGING_INST) {
2751 WL_DBG(("No buffer available for the ranging instance"));
2752 goto done;
2753 }
2754 ranging_inst = &cfg->nan_ranging_info[i];
2755 memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
2756 ranging_inst->range_status = NAN_RANGING_REQUIRED;
2757 ranging_inst->svc_inst_id = svc_id;
2758 }
2759
2760 done:
2761 return ranging_inst;
2762 }
2763 #endif /* WL_NAN_DISC_CACHE */
2764
2765 static int
2766 process_resp_buf(void *iov_resp,
2767 uint8 *instance_id, uint16 sub_cmd_id)
2768 {
2769 int res = BCME_OK;
2770 NAN_DBG_ENTER();
2771
2772 if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
2773 wl_nan_dp_req_ret_t *dpreq_ret = NULL;
2774 dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
2775 *instance_id = dpreq_ret->ndp_id;
2776 WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
2777 __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
2778 } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
2779 wl_nan_range_id *range_id = NULL;
2780 range_id = (wl_nan_range_id *)(iov_resp);
2781 *instance_id = *range_id;
2782 WL_TRACE(("Range id: %d\n", *range_id));
2783 }
2784 WL_DBG(("instance_id: %d\n", *instance_id));
2785 NAN_DBG_EXIT();
2786 return res;
2787 }
2788
2789 #ifdef WL_NAN_DISC_CACHE
2790 static int
2791 wl_cfgnan_cancel_ranging(struct net_device *ndev,
2792 struct bcm_cfg80211 *cfg, uint8 range_id, uint32 *status)
2793 {
2794 bcm_iov_batch_buf_t *nan_buf = NULL;
2795 s32 ret = BCME_OK;
2796 uint16 nan_iov_start, nan_iov_end;
2797 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
2798 uint16 subcmd_len;
2799 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
2800 wl_nan_iov_t *nan_iov_data = NULL;
2801 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
2802
2803 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
2804
2805 NAN_DBG_ENTER();
2806
2807 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
2808 if (!nan_buf) {
2809 WL_ERR(("%s: memory allocation failed\n", __func__));
2810 ret = BCME_NOMEM;
2811 goto fail;
2812 }
2813
2814 nan_iov_data = MALLOCZ(dhdp->osh, sizeof(*nan_iov_data));
2815 if (!nan_iov_data) {
2816 WL_ERR(("%s: memory allocation failed\n", __func__));
2817 ret = BCME_NOMEM;
2818 goto fail;
2819 }
2820
2821 nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
2822 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
2823 nan_buf->count = 0;
2824 nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
2825 nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
2826 sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
2827
2828 ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
2829 sizeof(range_id), &subcmd_len);
2830 if (unlikely(ret)) {
2831 WL_ERR(("nan_sub_cmd check failed\n"));
2832 goto fail;
2833 }
2834
2835 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
2836 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(range_id);
2837 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
2838
2839 /* Reduce the iov_len size by subcmd_len */
2840 nan_iov_data->nan_iov_len -= subcmd_len;
2841 nan_iov_end = nan_iov_data->nan_iov_len;
2842 nan_buf_size = (nan_iov_start - nan_iov_end);
2843
2844 memcpy(sub_cmd->data, &range_id, sizeof(range_id));
2845
2846 nan_buf->is_set = true;
2847 nan_buf->count++;
2848 memset(resp_buf, 0, sizeof(resp_buf));
2849 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
2850 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
2851 if (unlikely(ret) || unlikely(*status)) {
2852 WL_ERR(("Range cancel failed ret %d status %d \n", ret, *status));
2853 goto fail;
2854 }
2855 WL_INFORM(("Range cancel with Range ID [%d] successfull\n", range_id));
2856 fail:
2857 if (nan_buf) {
2858 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
2859 }
2860 if (nan_iov_data) {
2861 MFREE(dhdp->osh, nan_iov_data, sizeof(*nan_iov_data));
2862 }
2863 NAN_DBG_EXIT();
2864 return ret;
2865 }
2866
2867 static int
2868 wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
2869 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id)
2870 {
2871 int ret = BCME_OK;
2872 int i;
2873 nan_svc_info_t *svc_info;
2874
2875 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
2876 if (!cfg->svc_info[i].svc_id) {
2877 svc_info = &cfg->svc_info[i];
2878 break;
2879 }
2880 }
2881 if (i == NAN_MAX_SVC_INST) {
2882 WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
2883 ret = BCME_NORESOURCE;
2884 goto fail;
2885 }
2886 if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
2887 WL_TRACE(("%s:updating ranging info", __FUNCTION__));
2888 svc_info->status = 1;
2889 svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
2890 svc_info->ranging_ind = cmd_data->ranging_indication;
2891 svc_info->ingress_limit = cmd_data->ingress_limit;
2892 svc_info->egress_limit = cmd_data->egress_limit;
2893 svc_info->ranging_required = 1;
2894 }
2895 if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
2896 svc_info->svc_id = cmd_data->sub_id;
2897 if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
2898 (cmd_data->tx_match.dlen)) {
2899 memcpy(svc_info->tx_match_filter,
2900 cmd_data->tx_match.data, cmd_data->tx_match.dlen);
2901 svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
2902 }
2903 } else {
2904 svc_info->svc_id = cmd_data->pub_id;
2905 }
2906 memcpy(svc_info->svc_hash, cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
2907 fail:
2908 return ret;
2909
2910 }
2911
2912 /* terminate all ranging sessions associated with a svc */
2913 static int
2914 wl_cfgnan_terminate_ranging_sessions(struct net_device *ndev,
2915 struct bcm_cfg80211 *cfg, uint8 svc_id)
2916 {
2917 /* cancel all related ranging instances */
2918 uint8 i;
2919 int ret = BCME_OK;
2920 uint32 status;
2921 nan_ranging_inst_t *ranging_inst;
2922 nan_svc_info_t *svc;
2923 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
2924 ranging_inst = &cfg->nan_ranging_info[i];
2925 if (ranging_inst->range_id && ranging_inst->svc_inst_id == svc_id) {
2926 ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
2927 &status);
2928 if (unlikely(ret) || unlikely(status)) {
2929 WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
2930 __FUNCTION__, ret, status));
2931 }
2932 memset(ranging_inst, 0, sizeof(nan_ranging_inst_t));
2933 WL_DBG(("Range cancelled \n"));
2934 }
2935 }
2936
2937 /* clear command ranging info */
2938 svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
2939 if (svc) {
2940 WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
2941 memset(svc, 0, sizeof(*svc));
2942 }
2943 return ret;
2944 }
2945
2946 static int
2947 wl_cfgnan_check_disc_res_for_ranging(struct bcm_cfg80211 *cfg,
2948 nan_event_data_t* nan_event_data)
2949 {
2950 nan_svc_info_t *svc;
2951 int ret = BCME_OK;
2952
2953 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
2954
2955 if (svc && svc->ranging_required) {
2956 nan_ranging_inst_t *ranging_inst;
2957 ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
2958 &nan_event_data->remote_nmi, nan_event_data->sub_id, TRUE);
2959 if (ranging_inst->range_status !=
2960 NAN_RANGING_IN_PROGRESS) {
2961 WL_DBG(("Trigger range request\n"));
2962 ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
2963 cfg, ranging_inst, svc, NAN_RANGE_REQ_CMD);
2964 if (unlikely(ret)) {
2965 WL_ERR(("Failed to trigger ranging, ret = (%d)\n", ret));
2966 memset(ranging_inst, 0, sizeof(*ranging_inst));
2967 goto exit;
2968 }
2969 }
2970 /* Disc event will be given on receving range_rpt event */
2971 WL_TRACE(("Disc event will given when Range RPT event is recvd"));
2972 } else {
2973 ret = BCME_UNSUPPORTED;
2974 }
2975 exit:
2976 return ret;
2977 }
2978
2979 /* ranging reqeust event handler */
2980 static int
2981 wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
2982 wl_nan_ev_rng_req_ind_t *rng_ind)
2983 {
2984 int ret = BCME_OK;
2985 nan_svc_info_t *svc = NULL;
2986 nan_ranging_inst_t *ranging_inst;
2987 uint8 i;
2988
2989 WL_DBG(("Trigger range response\n"));
2990 for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
2991 if (cfg->svc_info[i].ranging_required) {
2992 svc = &cfg->svc_info[i];
2993 }
2994 }
2995 if (!svc) {
2996 /*
2997 * no publisher indicated ranging support,
2998 * ignoring ranging request for now
2999 */
3000 WL_TRACE(("No publisher has ranging supported.so will reject in trigger api"));
3001 ret = BCME_OK;
3002 goto exit;
3003 } else {
3004 ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
3005 svc->svc_id, TRUE);
3006 if (ranging_inst && ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
3007 ranging_inst->range_id = rng_ind->rng_id;
3008 ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
3009 ranging_inst, svc, NAN_RANGE_REQ_EVNT);
3010
3011 if (unlikely(ret)) {
3012 WL_ERR(("Failed to trigger range response, ret = (%d)\n", ret));
3013 memset(ranging_inst, 0, sizeof(*ranging_inst));
3014 goto exit;
3015 }
3016 } else {
3017 WL_INFORM(("Ranging for the peer already in progress"));
3018 }
3019 }
3020
3021 exit:
3022 return ret;
3023 }
3024
3025 /* ranging quest and response iovar handler */
3026 static int
3027 wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
3028 void *ranging_ctxt, nan_svc_info_t *svc, uint8 range_cmd)
3029 {
3030 s32 ret = BCME_OK;
3031 bcm_iov_batch_buf_t *nan_buf = NULL;
3032 wl_nan_range_req_t *range_req = NULL;
3033 wl_nan_range_resp_t *range_resp = NULL;
3034 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3035 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3036 uint32 status;
3037 uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
3038 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
3039 nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
3040 nan_avail_cmd_data cmd_data;
3041
3042 NAN_DBG_ENTER();
3043
3044 memset(&cmd_data, 0, sizeof(cmd_data));
3045 memcpy(&cmd_data.peer_nmi, &ranging_inst->peer_addr, ETHER_ADDR_LEN);
3046 cmd_data.avail_period = NAN_RANGING_PERIOD;
3047 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3048 cfg, &cmd_data, WL_AVAIL_LOCAL);
3049 if (unlikely(ret)) {
3050 WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
3051 goto fail;
3052 }
3053
3054 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
3055 cfg, &cmd_data, WL_AVAIL_RANGING);
3056 if (unlikely(ret)) {
3057 WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
3058 goto fail;
3059 }
3060
3061 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
3062 if (!nan_buf) {
3063 WL_ERR(("%s: memory allocation failed\n", __func__));
3064 ret = BCME_NOMEM;
3065 goto fail;
3066 }
3067
3068 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3069 nan_buf->count = 0;
3070 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3071
3072 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
3073 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3074 if (range_cmd == NAN_RANGE_REQ_CMD) {
3075 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
3076 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
3077 range_req = (wl_nan_range_req_t *)(sub_cmd->data);
3078 /* ranging config */
3079 range_req->peer = ranging_inst->peer_addr;
3080 range_req->interval = svc->ranging_interval;
3081 /* Limits are in cm from host */
3082 range_req->ingress = (svc->ingress_limit*10);
3083 range_req->egress = (svc->egress_limit*10);
3084 range_req->indication = svc->ranging_ind;
3085 } else {
3086 /* range response config */
3087 sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
3088 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
3089 range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
3090 range_resp->range_id = ranging_inst->range_id;
3091 range_resp->status = svc->status;
3092 nan_buf->is_set = true;
3093 }
3094
3095 nan_buf_size -= (sub_cmd->len +
3096 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
3097 nan_buf->count++;
3098
3099 memset(resp_buf, 0, sizeof(resp_buf));
3100 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
3101 &status,
3102 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3103 if (unlikely(ret) || unlikely(status)) {
3104 WL_ERR(("nan ranging failed ret = %d status = %d\n",
3105 ret, status));
3106 ret = status;
3107 goto fail;
3108 }
3109 WL_TRACE(("nan ranging trigger successful\n"));
3110
3111 /* check the response buff for request */
3112 if (range_cmd == NAN_RANGE_REQ_CMD) {
3113 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
3114 &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
3115 WL_TRACE(("\n ranging instance returned %d\n", ranging_inst->range_id));
3116 }
3117 /* Preventing continuous range requests */
3118 ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
3119
3120 fail:
3121 if (nan_buf) {
3122 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3123 }
3124
3125 NAN_DBG_EXIT();
3126 return ret;
3127 }
3128 #endif /* WL_NAN_DISC_CACHE */
3129
3130 static void *wl_nan_bloom_alloc(void *ctx, uint size)
3131 {
3132 uint8 *buf;
3133 BCM_REFERENCE(ctx);
3134
3135 buf = kmalloc(size, GFP_KERNEL);
3136 if (!buf) {
3137 WL_ERR(("%s: memory allocation failed\n", __func__));
3138 buf = NULL;
3139 }
3140 return buf;
3141 }
3142
3143 static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
3144 {
3145 BCM_REFERENCE(ctx);
3146 BCM_REFERENCE(size);
3147 if (buf) {
3148 kfree(buf);
3149 }
3150 }
3151
3152 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
3153 #pragma GCC diagnostic ignored "-Wcast-qual"
3154 #endif // endif
3155 static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
3156 {
3157 uint8* filter_idx = (uint8*)ctx;
3158 uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
3159 uint b = 0;
3160
3161 /* Steps 1 and 2 as explained in Section 6.2 */
3162 /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
3163 b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
3164 b = hndcrc32((uint8*)input, input_len, b);
3165 /* Obtain the last 2 bytes of the CRC32 output */
3166 b &= NAN_BLOOM_CRC32_MASK;
3167
3168 /* Step 3 is completed by bcmbloom functions */
3169 return b;
3170 }
3171
3172 static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
3173 {
3174 uint i;
3175 int err;
3176
3177 err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
3178 idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
3179 if (err != BCME_OK) {
3180 goto exit;
3181 }
3182
3183 /* Populate bloom filter with hash functions */
3184 for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
3185 err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
3186 if (err) {
3187 WL_ERR(("bcm_bloom_add_hash failed\n"));
3188 goto exit;
3189 }
3190 }
3191 exit:
3192 return err;
3193 }
3194
3195 static int
3196 wl_cfgnan_sd_params_handler(struct net_device *ndev,
3197 nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
3198 void *p_buf, uint16 *nan_buf_size)
3199 {
3200 s32 ret = BCME_OK;
3201 uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
3202 uint16 buflen_avail;
3203 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
3204 wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
3205 uint16 srf_size = 0;
3206 uint bloom_size, a;
3207 bcm_bloom_filter_t *bp = NULL;
3208 /* Bloom filter index default, indicates it has not been set */
3209 uint bloom_idx = 0xFFFFFFFF;
3210 uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
3211 /* srf_ctrl_size = bloom_len + src_control field */
3212 uint16 srf_ctrl_size = bloom_len + 1;
3213
3214 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
3215 struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
3216 BCM_REFERENCE(cfg);
3217
3218 NAN_DBG_ENTER();
3219
3220 if (cmd_data->period) {
3221 sd_params->awake_dw = cmd_data->period;
3222 }
3223 sd_params->period = 1;
3224
3225 if (cmd_data->ttl) {
3226 sd_params->ttl = cmd_data->ttl;
3227 } else {
3228 sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
3229 }
3230
3231 sd_params->flags = 0;
3232 sd_params->flags = cmd_data->flags;
3233
3234 /* Nan Service Based event suppression Flags */
3235 if (cmd_data->recv_ind_flag) {
3236 /* BIT0 - If set, host wont rec event "terminated" */
3237 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
3238 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
3239 }
3240
3241 /* BIT1 - If set, host wont receive match expiry evt */
3242 /* TODO: Exp not yet supported */
3243 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
3244 WL_DBG(("Need to add match expiry event\n"));
3245 }
3246 /* BIT2 - If set, host wont rec event "receive" */
3247 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
3248 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
3249 }
3250 /* BIT3 - If set, host wont rec event "replied" */
3251 if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
3252 sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
3253 }
3254 }
3255 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
3256 sd_params->instance_id = cmd_data->pub_id;
3257 if (cmd_data->service_responder_policy) {
3258 /* Do not disturb avail if dam is supported */
3259 if (FW_SUPPORTED(dhdp, autodam)) {
3260 /* Nan Accept policy: Per service basis policy
3261 * Based on this policy(ALL/NONE), responder side
3262 * will send ACCEPT/REJECT
3263 * If set, auto datapath responder will be sent by FW
3264 */
3265 sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
3266 } else {
3267 WL_ERR(("svc specifiv auto dp resp is not"
3268 " supported in non-auto dam fw\n"));
3269 }
3270 }
3271 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3272 sd_params->instance_id = cmd_data->sub_id;
3273 } else {
3274 ret = BCME_USAGE_ERROR;
3275 WL_ERR(("wrong command id = %d \n", cmd_id));
3276 goto fail;
3277 }
3278
3279 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
3280 (cmd_data->svc_hash.data)) {
3281 memcpy((uint8*)sd_params->svc_hash, cmd_data->svc_hash.data,
3282 cmd_data->svc_hash.dlen);
3283 #ifdef WL_NAN_DEBUG
3284 prhex("hashed svc name", cmd_data->svc_hash.data,
3285 cmd_data->svc_hash.dlen);
3286 #endif /* WL_NAN_DEBUG */
3287 } else {
3288 ret = BCME_ERROR;
3289 WL_ERR(("invalid svc hash data or length = %d\n",
3290 cmd_data->svc_hash.dlen));
3291 goto fail;
3292 }
3293
3294 /* check if ranging support is present in firmware */
3295 if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
3296 !FW_SUPPORTED(dhdp, nanrange)) {
3297 WL_ERR(("Service requires ranging but fw doesnt support it\n"));
3298 ret = BCME_UNSUPPORTED;
3299 goto fail;
3300 }
3301
3302 /* Optional parameters: fill the sub_command block with service descriptor attr */
3303 sub_cmd->id = htod16(cmd_id);
3304 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3305 sub_cmd->len = sizeof(sub_cmd->u.options) +
3306 OFFSETOF(wl_nan_sd_params_t, optional[0]);
3307 pxtlv = (uint8*)&sd_params->optional[0];
3308
3309 *nan_buf_size -= sub_cmd->len;
3310 buflen_avail = *nan_buf_size;
3311
3312 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
3313 WL_TRACE(("optional svc_info present, pack it\n"));
3314 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3315 WL_NAN_XTLV_SD_SVC_INFO,
3316 cmd_data->svc_info.dlen,
3317 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
3318 if (unlikely(ret)) {
3319 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
3320 goto fail;
3321 }
3322 }
3323
3324 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
3325 WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
3326 cmd_data->sde_svc_info.dlen));
3327 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3328 WL_NAN_XTLV_SD_SDE_SVC_INFO,
3329 cmd_data->sde_svc_info.dlen,
3330 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
3331 if (unlikely(ret)) {
3332 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
3333 goto fail;
3334 }
3335 }
3336
3337 if (cmd_data->tx_match.dlen) {
3338 WL_TRACE(("optional tx match filter presnet (len=%d)\n",
3339 cmd_data->tx_match.dlen));
3340 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3341 WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
3342 cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
3343 if (unlikely(ret)) {
3344 WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
3345 goto fail;
3346 }
3347 }
3348
3349 if (cmd_data->life_count) {
3350 WL_TRACE(("optional life count is present, pack it\n"));
3351 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
3352 sizeof(cmd_data->life_count), &cmd_data->life_count,
3353 BCM_XTLV_OPTION_ALIGN32);
3354 if (unlikely(ret)) {
3355 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
3356 goto fail;
3357 }
3358 }
3359
3360 if (cmd_data->use_srf) {
3361 uint8 srf_control = 0;
3362 /* set include bit */
3363 if (cmd_data->srf_include == true) {
3364 srf_control |= 0x2;
3365 }
3366
3367 if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
3368 (cmd_data->mac_list.num_mac_addr
3369 < NAN_SRF_MAX_MAC)) {
3370 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
3371 /* mac list */
3372 srf_size = (cmd_data->mac_list.num_mac_addr
3373 * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
3374 WL_TRACE(("srf size = %d\n", srf_size));
3375
3376 srf_mac = MALLOCZ(dhdp->osh, srf_size);
3377 if (srf_mac == NULL) {
3378 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
3379 ret = -ENOMEM;
3380 goto fail;
3381 }
3382 memcpy(srf_mac, &srf_control, NAN_SRF_CTRL_FIELD_LEN);
3383 memcpy(srf_mac+1, cmd_data->mac_list.list,
3384 (srf_size - NAN_SRF_CTRL_FIELD_LEN));
3385 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3386 WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
3387 BCM_XTLV_OPTION_ALIGN32);
3388 if (unlikely(ret)) {
3389 WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
3390 __FUNCTION__));
3391 goto fail;
3392 }
3393 } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
3394 /* Create bloom filter */
3395 srf = MALLOCZ(dhdp->osh, srf_ctrl_size);
3396 if (srf == NULL) {
3397 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
3398 ret = -ENOMEM;
3399 goto fail;
3400 }
3401 /* Bloom filter */
3402 srf_control |= 0x1;
3403 /* Instance id must be from 0 to 254, 255 is vendor specific */
3404 if (sd_params->instance_id <= NAN_ID_MIN ||
3405 sd_params->instance_id > (NAN_ID_MAX - 1)) {
3406 WL_ERR(("Invalid instance id\n"));
3407 ret = BCME_BADARG;
3408 goto fail;
3409 }
3410 if (bloom_idx == 0xFFFFFFFF) {
3411 bloom_idx = sd_params->instance_id % 4;
3412 } else {
3413 WL_ERR(("Invalid bloom_idx\n"));
3414 ret = BCME_BADARG;
3415 goto fail;
3416
3417 }
3418 srf_control |= bloom_idx << 2;
3419
3420 ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
3421 if (unlikely(ret)) {
3422 WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
3423 goto fail;
3424 }
3425
3426 srftmp = cmd_data->mac_list.list;
3427 for (a = 0;
3428 a < cmd_data->mac_list.num_mac_addr; a++) {
3429 ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
3430 if (unlikely(ret)) {
3431 WL_ERR(("%s: Cannot add to bloom filter\n",
3432 __FUNCTION__));
3433 goto fail;
3434 }
3435 srftmp += ETHER_ADDR_LEN;
3436 }
3437
3438 memcpy(srf, &srf_control, NAN_SRF_CTRL_FIELD_LEN);
3439 ret = bcm_bloom_get_filter_data(bp, bloom_len,
3440 (srf + NAN_SRF_CTRL_FIELD_LEN),
3441 &bloom_size);
3442 if (unlikely(ret)) {
3443 WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
3444 goto fail;
3445 }
3446 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3447 WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
3448 srf, BCM_XTLV_OPTION_ALIGN32);
3449 if (ret != BCME_OK) {
3450 goto fail;
3451 }
3452 } else {
3453 WL_ERR(("Invalid SRF Type = %d !!!\n",
3454 cmd_data->srf_type));
3455 goto fail;
3456 }
3457 } else {
3458 WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
3459 cmd_data->mac_list.num_mac_addr));
3460 goto fail;
3461 }
3462 }
3463
3464 if (cmd_data->rx_match.dlen) {
3465 WL_TRACE(("optional rx match filter is present, pack it\n"));
3466 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3467 WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
3468 cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
3469 if (unlikely(ret)) {
3470 WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
3471 goto fail;
3472 }
3473 }
3474
3475 /* Security elements */
3476 if (cmd_data->csid) {
3477 WL_TRACE(("Cipher suite type is present, pack it\n"));
3478 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3479 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
3480 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
3481 if (unlikely(ret)) {
3482 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
3483 goto fail;
3484 }
3485 }
3486
3487 if (cmd_data->ndp_cfg.security_cfg) {
3488 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
3489 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
3490 if (cmd_data->key.data && cmd_data->key.dlen) {
3491 WL_TRACE(("optional pmk present, pack it\n"));
3492 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3493 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
3494 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
3495 if (unlikely(ret)) {
3496 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
3497 __FUNCTION__));
3498 goto fail;
3499 }
3500 }
3501 } else {
3502 WL_ERR(("Invalid security key type\n"));
3503 ret = BCME_BADARG;
3504 goto fail;
3505 }
3506 }
3507
3508 if (cmd_data->scid.data && cmd_data->scid.dlen) {
3509 WL_TRACE(("optional scid present, pack it\n"));
3510 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
3511 cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
3512 if (unlikely(ret)) {
3513 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
3514 goto fail;
3515 }
3516 }
3517
3518 if (cmd_data->sde_control_flag) {
3519 ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
3520 WL_NAN_XTLV_SD_SDE_CONTROL,
3521 sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
3522 BCM_XTLV_OPTION_ALIGN32);
3523 if (ret != BCME_OK) {
3524 WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
3525 goto fail;
3526 }
3527 }
3528
3529 sub_cmd->len += (buflen_avail - *nan_buf_size);
3530
3531 fail:
3532 if (srf) {
3533 MFREE(dhdp->osh, srf, srf_ctrl_size);
3534 }
3535
3536 if (srf_mac) {
3537 MFREE(dhdp->osh, srf_mac, srf_size);
3538 }
3539 NAN_DBG_EXIT();
3540 return ret;
3541 }
3542
3543 static int
3544 wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
3545 {
3546 s32 ret = BCME_OK;
3547 if (cmd_data->svc_info.dlen)
3548 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3549 if (cmd_data->sde_svc_info.dlen)
3550 *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3551 if (cmd_data->tx_match.dlen)
3552 *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3553 if (cmd_data->rx_match.dlen)
3554 *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3555 if (cmd_data->use_srf) {
3556 if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
3557 *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
3558 + NAN_SRF_CTRL_FIELD_LEN;
3559 } else { /* Bloom filter type */
3560 *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
3561 }
3562 *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
3563 }
3564 if (cmd_data->csid)
3565 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
3566 if (cmd_data->key.dlen)
3567 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3568 if (cmd_data->scid.dlen)
3569 *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3570 if (cmd_data->sde_control_flag)
3571 *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
3572 if (cmd_data->life_count)
3573 *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
3574 return ret;
3575 }
3576
3577 static int
3578 wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 *data_size, nan_datapath_cmd_data_t *cmd_data)
3579 {
3580 s32 ret = BCME_OK;
3581 if (cmd_data->svc_info.dlen)
3582 *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3583 if (cmd_data->key.dlen)
3584 *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
3585 if (cmd_data->csid)
3586 *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
3587
3588 *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
3589 return ret;
3590 }
3591 int
3592 wl_cfgnan_svc_get_handler(struct net_device *ndev,
3593 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
3594 {
3595 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
3596 uint32 instance_id;
3597 s32 ret = BCME_OK;
3598 bcm_iov_batch_buf_t *nan_buf = NULL;
3599
3600 uint8 *resp_buf = NULL;
3601 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
3602
3603 NAN_DBG_ENTER();
3604
3605 nan_buf = MALLOCZ(cfg->osh, data_size);
3606 if (!nan_buf) {
3607 WL_ERR(("%s: memory allocation failed\n", __func__));
3608 ret = BCME_NOMEM;
3609 goto fail;
3610 }
3611
3612 resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
3613 if (!resp_buf) {
3614 WL_ERR(("%s: memory allocation failed\n", __func__));
3615 ret = BCME_NOMEM;
3616 goto fail;
3617 }
3618 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3619 nan_buf->count = 1;
3620 /* check if service is present */
3621 nan_buf->is_set = false;
3622 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
3623 if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
3624 instance_id = cmd_data->pub_id;
3625 } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
3626 instance_id = cmd_data->sub_id;
3627 } else {
3628 ret = BCME_USAGE_ERROR;
3629 WL_ERR(("wrong command id = %u\n", cmd_id));
3630 goto fail;
3631 }
3632 /* Fill the sub_command block */
3633 sub_cmd->id = htod16(cmd_id);
3634 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
3635 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3636 memcpy(sub_cmd->data, &instance_id, sizeof(instance_id));
3637 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
3638 &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
3639
3640 if (unlikely(ret) || unlikely(cmd_data->status)) {
3641 WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
3642 goto fail;
3643 } else {
3644 WL_DBG(("nan svc check successful..proceed to update\n"));
3645 }
3646
3647 fail:
3648 if (nan_buf) {
3649 MFREE(cfg->osh, nan_buf, data_size);
3650 }
3651
3652 if (resp_buf) {
3653 MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
3654 }
3655 NAN_DBG_EXIT();
3656 return ret;
3657
3658 }
3659
3660 int
3661 wl_cfgnan_svc_handler(struct net_device *ndev,
3662 struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
3663 {
3664 s32 ret = BCME_OK;
3665 bcm_iov_batch_buf_t *nan_buf = NULL;
3666 uint16 nan_buf_size;
3667 uint8 *resp_buf = NULL;
3668 /* Considering fixed params */
3669 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
3670 OFFSETOF(wl_nan_sd_params_t, optional[0]);
3671
3672 if (cmd_data->svc_update) {
3673 ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
3674 if (ret != BCME_OK) {
3675 goto fail;
3676 } else {
3677 /* Ignoring any other svc get error */
3678 if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
3679 goto fail;
3680 }
3681 }
3682 }
3683
3684 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
3685 if (unlikely(ret)) {
3686 WL_ERR(("Failed to get alligned size of optional params\n"));
3687 goto fail;
3688 }
3689 nan_buf_size = data_size;
3690 NAN_DBG_ENTER();
3691
3692 nan_buf = MALLOCZ(cfg->osh, data_size);
3693 if (!nan_buf) {
3694 WL_ERR(("%s: memory allocation failed\n", __func__));
3695 ret = BCME_NOMEM;
3696 goto fail;
3697 }
3698
3699 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
3700 if (!resp_buf) {
3701 WL_ERR(("%s: memory allocation failed\n", __func__));
3702 ret = BCME_NOMEM;
3703 goto fail;
3704 }
3705 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3706 nan_buf->count = 0;
3707 nan_buf->is_set = true;
3708
3709 ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
3710 &nan_buf->cmds[0], &nan_buf_size);
3711 if (unlikely(ret)) {
3712 WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
3713 goto fail;
3714 }
3715
3716 nan_buf->count++;
3717 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
3718 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
3719 if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
3720 /* return OK if update tlv data is not present
3721 * which means nothing to update
3722 */
3723 cmd_data->status = BCME_OK;
3724 }
3725 if (unlikely(ret) || unlikely(cmd_data->status)) {
3726 WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
3727 goto fail;
3728 } else {
3729 WL_DBG(("nan svc successful\n"));
3730 #ifdef WL_NAN_DISC_CACHE
3731 if (!cmd_data->svc_update) { /* cache new service */
3732 ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id);
3733 if (ret < 0) {
3734 WL_ERR(("%s: fail to cache svc info, ret=%d\n",
3735 __FUNCTION__, ret));
3736 goto fail;
3737 }
3738 } else {
3739 WL_DBG(("skipping caching for update of svc %d\n", cmd_id));
3740 }
3741 #endif /* WL_NAN_DISC_CACHE */
3742 }
3743
3744 fail:
3745 if (nan_buf) {
3746 MFREE(cfg->osh, nan_buf, data_size);
3747 }
3748
3749 if (resp_buf) {
3750 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
3751 }
3752 NAN_DBG_EXIT();
3753 return ret;
3754 }
3755
3756 int
3757 wl_cfgnan_publish_handler(struct net_device *ndev,
3758 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
3759 {
3760 int ret = BCME_OK;
3761 NAN_DBG_ENTER();
3762 NAN_MUTEX_LOCK();
3763
3764 /*
3765 * proceed only if mandatory arguments are present - subscriber id,
3766 * service hash
3767 */
3768 if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
3769 (!cmd_data->svc_hash.dlen)) {
3770 WL_ERR(("mandatory arguments are not present\n"));
3771 ret = BCME_BADARG;
3772 goto fail;
3773 }
3774
3775 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
3776 if (ret < 0) {
3777 WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
3778 goto fail;
3779 }
3780 WL_INFORM_MEM(("[NAN] Service published for instance id:%d\n", cmd_data->pub_id));
3781
3782 fail:
3783 NAN_MUTEX_UNLOCK();
3784 NAN_DBG_EXIT();
3785 return ret;
3786 }
3787
3788 int
3789 wl_cfgnan_subscribe_handler(struct net_device *ndev,
3790 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
3791 {
3792 int ret = BCME_OK;
3793 NAN_DBG_ENTER();
3794 NAN_MUTEX_LOCK();
3795
3796 /*
3797 * proceed only if mandatory arguments are present - subscriber id,
3798 * service hash
3799 */
3800 if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
3801 (!cmd_data->svc_hash.dlen)) {
3802 WL_ERR(("mandatory arguments are not present\n"));
3803 ret = BCME_BADARG;
3804 goto fail;
3805 }
3806
3807 ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
3808 if (ret < 0) {
3809 WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
3810 goto fail;
3811 }
3812 WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d\n", cmd_data->sub_id));
3813
3814 fail:
3815 NAN_MUTEX_UNLOCK();
3816 NAN_DBG_EXIT();
3817 return ret;
3818 }
3819
3820 static int
3821 wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
3822 uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
3823 {
3824 s32 ret = BCME_OK;
3825
3826 NAN_DBG_ENTER();
3827
3828 if (p_buf != NULL) {
3829 bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
3830 wl_nan_instance_id_t instance_id;
3831
3832 if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
3833 instance_id = cmd_data->pub_id;
3834 } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
3835 instance_id = cmd_data->sub_id;
3836 } else {
3837 ret = BCME_USAGE_ERROR;
3838 WL_ERR(("wrong command id = %u\n", cmd_id));
3839 goto fail;
3840 }
3841
3842 /* Fill the sub_command block */
3843 sub_cmd->id = htod16(cmd_id);
3844 sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
3845 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
3846 memcpy(sub_cmd->data, &instance_id, sizeof(instance_id));
3847 /* adjust iov data len to the end of last data record */
3848 *nan_buf_size -= (sub_cmd->len +
3849 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
3850 WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
3851 } else {
3852 WL_ERR(("nan_iov_buf is NULL\n"));
3853 ret = BCME_ERROR;
3854 goto fail;
3855 }
3856
3857 fail:
3858 NAN_DBG_EXIT();
3859 return ret;
3860 }
3861
3862 int
3863 wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
3864 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
3865 {
3866 bcm_iov_batch_buf_t *nan_buf = NULL;
3867 s32 ret = BCME_OK;
3868 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3869 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3870 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
3871
3872 NAN_DBG_ENTER();
3873 NAN_MUTEX_LOCK();
3874
3875 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
3876 if (!nan_buf) {
3877 WL_ERR(("%s: memory allocation failed\n", __func__));
3878 ret = BCME_NOMEM;
3879 goto fail;
3880 }
3881
3882 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3883 nan_buf->count = 0;
3884 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3885
3886 /* proceed only if mandatory argument is present - publisher id */
3887 if (!cmd_data->pub_id) {
3888 WL_ERR(("mandatory argument is not present\n"));
3889 ret = BCME_BADARG;
3890 goto fail;
3891 }
3892
3893 #ifdef WL_NAN_DISC_CACHE
3894 /* terminate ranging sessions for this svc */
3895 wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->pub_id);
3896 #endif /* WL_NAN_DISC_CACHE */
3897 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
3898 &nan_buf->cmds[0], &nan_buf_size);
3899 if (unlikely(ret)) {
3900 WL_ERR(("cancel publish failed\n"));
3901 goto fail;
3902 }
3903 nan_buf->is_set = true;
3904 nan_buf->count++;
3905
3906 memset(resp_buf, 0, sizeof(resp_buf));
3907 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
3908 &(cmd_data->status),
3909 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3910 if (unlikely(ret) || unlikely(cmd_data->status)) {
3911 WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
3912 ret, cmd_data->status));
3913 goto fail;
3914 }
3915 WL_DBG(("nan cancel publish successfull\n"));
3916 fail:
3917 if (nan_buf) {
3918 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3919 }
3920
3921 NAN_MUTEX_UNLOCK();
3922 NAN_DBG_EXIT();
3923 return ret;
3924 }
3925
3926 int
3927 wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
3928 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
3929 {
3930 bcm_iov_batch_buf_t *nan_buf = NULL;
3931 s32 ret = BCME_OK;
3932 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
3933 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
3934 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
3935
3936 NAN_DBG_ENTER();
3937 NAN_MUTEX_LOCK();
3938
3939 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
3940 if (!nan_buf) {
3941 WL_ERR(("%s: memory allocation failed\n", __func__));
3942 ret = BCME_NOMEM;
3943 goto fail;
3944 }
3945
3946 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
3947 nan_buf->count = 0;
3948 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
3949
3950 /* proceed only if mandatory argument is present - subscriber id */
3951 if (!cmd_data->sub_id) {
3952 WL_ERR(("mandatory argument is not present\n"));
3953 ret = BCME_BADARG;
3954 goto fail;
3955 }
3956
3957 #ifdef WL_NAN_DISC_CACHE
3958 /* terminate ranging sessions for this svc */
3959 wl_cfgnan_terminate_ranging_sessions(ndev, cfg, cmd_data->sub_id);
3960 wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
3961 #endif /* WL_NAN_DISC_CACHE */
3962
3963 ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
3964 &nan_buf->cmds[0], &nan_buf_size);
3965 if (unlikely(ret)) {
3966 WL_ERR(("cancel subscribe failed\n"));
3967 goto fail;
3968 }
3969 nan_buf->is_set = true;
3970 nan_buf->count++;
3971
3972 memset(resp_buf, 0, sizeof(resp_buf));
3973 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
3974 &(cmd_data->status),
3975 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
3976 if (unlikely(ret) || unlikely(cmd_data->status)) {
3977 WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
3978 ret, cmd_data->status));
3979 goto fail;
3980 }
3981 WL_DBG(("subscribe cancel successfull\n"));
3982 fail:
3983 if (nan_buf) {
3984 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
3985 }
3986
3987 NAN_MUTEX_UNLOCK();
3988 NAN_DBG_EXIT();
3989 return ret;
3990 }
3991
3992 int
3993 wl_cfgnan_transmit_handler(struct net_device *ndev,
3994 struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
3995 {
3996 s32 ret = BCME_OK;
3997 bcm_iov_batch_buf_t *nan_buf = NULL;
3998 wl_nan_sd_transmit_t *sd_xmit = NULL;
3999 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4000 bool is_lcl_id = FALSE;
4001 bool is_dest_id = FALSE;
4002 bool is_dest_mac = FALSE;
4003 uint16 buflen_avail;
4004 uint8 *pxtlv;
4005 uint16 nan_buf_size;
4006 uint8 *resp_buf = NULL;
4007 /* Considering fixed params */
4008 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
4009 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
4010 data_size = ALIGN_SIZE(data_size, 4);
4011 ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
4012 if (unlikely(ret)) {
4013 WL_ERR(("Failed to get alligned size of optional params\n"));
4014 goto fail;
4015 }
4016 NAN_DBG_ENTER();
4017 NAN_MUTEX_LOCK();
4018 nan_buf_size = data_size;
4019 nan_buf = MALLOCZ(cfg->osh, data_size);
4020 if (!nan_buf) {
4021 WL_ERR(("%s: memory allocation failed\n", __func__));
4022 ret = BCME_NOMEM;
4023 goto fail;
4024 }
4025
4026 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
4027 if (!resp_buf) {
4028 WL_ERR(("%s: memory allocation failed\n", __func__));
4029 ret = BCME_NOMEM;
4030 goto fail;
4031 }
4032
4033 /* nan transmit */
4034 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4035 nan_buf->count = 0;
4036 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4037 /*
4038 * proceed only if mandatory arguments are present - subscriber id,
4039 * publisher id, mac address
4040 */
4041 if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
4042 ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
4043 WL_ERR(("mandatory arguments are not present\n"));
4044 ret = -EINVAL;
4045 goto fail;
4046 }
4047
4048 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4049 sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
4050
4051 /* local instance id must be from 0 to 254, 255 is vendor specific */
4052 if (cmd_data->local_id <= NAN_ID_MIN ||
4053 cmd_data->local_id > (NAN_ID_MAX - 1)) {
4054 WL_ERR(("Invalid local instance id\n"));
4055 ret = BCME_BADARG;
4056 goto fail;
4057 }
4058 sd_xmit->local_service_id = cmd_data->local_id;
4059 is_lcl_id = TRUE;
4060
4061 /* remote instance id must be from 0 to 254, 255 is vendor specific */
4062 if (cmd_data->remote_id <= NAN_ID_MIN ||
4063 cmd_data->remote_id > (NAN_ID_MAX - 1)) {
4064 WL_ERR(("Invalid remote instance id\n"));
4065 ret = BCME_BADARG;
4066 goto fail;
4067 }
4068
4069 sd_xmit->requestor_service_id = cmd_data->remote_id;
4070 is_dest_id = TRUE;
4071
4072 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
4073 memcpy(&sd_xmit->destination_addr, &cmd_data->mac_addr, ETHER_ADDR_LEN);
4074 } else {
4075 WL_ERR(("Invalid ether addr provided\n"));
4076 ret = BCME_BADARG;
4077 goto fail;
4078 }
4079 is_dest_mac = TRUE;
4080
4081 if (cmd_data->priority) {
4082 sd_xmit->priority = cmd_data->priority;
4083 }
4084 sd_xmit->token = cmd_data->token;
4085
4086 if (cmd_data->recv_ind_flag) {
4087 /* BIT0 - If set, host wont rec event "txs" */
4088 if (CHECK_BIT(cmd_data->recv_ind_flag,
4089 WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
4090 sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
4091 }
4092 }
4093 /* Optional parameters: fill the sub_command block with service descriptor attr */
4094 sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
4095 sub_cmd->len = sizeof(sub_cmd->u.options) +
4096 OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
4097 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4098 pxtlv = (uint8 *)&sd_xmit->opt_tlv;
4099
4100 nan_buf_size -= (sub_cmd->len +
4101 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4102
4103 buflen_avail = nan_buf_size;
4104
4105 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
4106 bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
4107 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4108 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
4109 cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
4110 if (unlikely(ret)) {
4111 WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
4112 __FUNCTION__, ret));
4113 goto fail;
4114 }
4115
4116 /* 0xFF is max length for svc_info */
4117 if (pxtlv_svc_info->len > 0xFF) {
4118 WL_ERR(("Invalid service info length %d\n",
4119 (pxtlv_svc_info->len)));
4120 ret = BCME_USAGE_ERROR;
4121 goto fail;
4122 }
4123 sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
4124 }
4125 if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
4126 WL_TRACE(("optional sdea svc_info present, pack it\n"));
4127 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4128 WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
4129 cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
4130 if (unlikely(ret)) {
4131 WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
4132 goto fail;
4133 }
4134 }
4135
4136 /* Check if all mandatory params are provided */
4137 if (is_lcl_id && is_dest_id && is_dest_mac) {
4138 nan_buf->count++;
4139 sub_cmd->len += (buflen_avail - nan_buf_size);
4140 } else {
4141 WL_ERR(("Missing parameters\n"));
4142 ret = BCME_USAGE_ERROR;
4143 }
4144 nan_buf->is_set = TRUE;
4145 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4146 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4147 if (unlikely(ret) || unlikely(cmd_data->status)) {
4148 WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
4149 sd_xmit->token, ret, cmd_data->status));
4150 goto fail;
4151 }
4152 WL_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
4153 fail:
4154 if (nan_buf) {
4155 MFREE(cfg->osh, nan_buf, data_size);
4156 }
4157 if (resp_buf) {
4158 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4159 }
4160 NAN_MUTEX_UNLOCK();
4161 NAN_DBG_EXIT();
4162 return ret;
4163 }
4164
4165 int
4166 wl_cfgnan_get_capablities_handler(struct net_device *ndev,
4167 struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
4168 {
4169 s32 ret = BCME_OK;
4170
4171 NAN_DBG_ENTER();
4172
4173 /* Populate get capability */
4174 capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
4175 capabilities->max_publishes = MAX_PUBLISHES;
4176 capabilities->max_subscribes = MAX_SUBSCRIBES;
4177 capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
4178 capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
4179 capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
4180 capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
4181 capabilities->max_ndi_interfaces = MAX_NDI_INTERFACES;
4182 capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
4183 capabilities->max_app_info_len = MAX_APP_INFO_LEN;
4184 capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
4185 capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
4186 capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
4187 capabilities->cipher_suites_supported = CIPHER_SUITE_SUPPORTED;
4188 capabilities->max_scid_len = MAX_SCID_LEN;
4189 capabilities->is_ndp_security_supported = true;
4190 capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
4191
4192 NAN_DBG_EXIT();
4193 return ret;
4194 }
4195
4196 bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg)
4197 {
4198 return cfg->nan_enable;
4199 }
4200
4201 int
4202 wl_cfgnan_init(struct bcm_cfg80211 *cfg)
4203 {
4204 s32 ret = BCME_OK;
4205 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4206 uint32 status;
4207 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4208 uint8 buf[NAN_IOCTL_BUF_SIZE];
4209 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
4210
4211 NAN_DBG_ENTER();
4212 if (cfg->nan_init_state) {
4213 WL_ERR(("nan initialized/nmi exists\n"));
4214 return BCME_OK;
4215 }
4216 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4217 nan_buf->count = 0;
4218 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4219 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
4220 if (unlikely(ret)) {
4221 WL_ERR(("init handler sub_cmd set failed\n"));
4222 goto fail;
4223 }
4224 nan_buf->count++;
4225 nan_buf->is_set = true;
4226
4227 memset(resp_buf, 0, sizeof(resp_buf));
4228 ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
4229 nan_buf, nan_buf_size, &status,
4230 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4231 if (unlikely(ret) || unlikely(status)) {
4232 WL_ERR(("nan init handler failed ret %d status %d\n",
4233 ret, status));
4234 goto fail;
4235 }
4236
4237 #ifdef WL_NAN_DISC_CACHE
4238 /* malloc for disc result */
4239 cfg->nan_disc_cache = MALLOCZ(cfg->osh,
4240 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
4241 if (!cfg->nan_disc_cache) {
4242 WL_ERR(("%s: memory allocation failed\n", __func__));
4243 ret = BCME_NOMEM;
4244 goto fail;
4245 }
4246 #endif /* WL_NAN_DISC_CACHE */
4247 cfg->nan_init_state = true;
4248 return ret;
4249 fail:
4250 NAN_DBG_EXIT();
4251 return ret;
4252 }
4253
4254 int
4255 wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
4256 {
4257 s32 ret = BCME_OK;
4258 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4259 uint32 status;
4260 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4261 uint8 buf[NAN_IOCTL_BUF_SIZE];
4262 bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
4263 uint8 i = 0;
4264
4265 NAN_DBG_ENTER();
4266 NAN_MUTEX_LOCK();
4267
4268 if (!cfg->nan_init_state) {
4269 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
4270 ret = BCME_OK;
4271 goto fail;
4272 }
4273
4274 if (busstate != DHD_BUS_DOWN) {
4275 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4276 nan_buf->count = 0;
4277 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4278
4279 WL_DBG(("nan deinit\n"));
4280 ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
4281 if (unlikely(ret)) {
4282 WL_ERR(("deinit handler sub_cmd set failed\n"));
4283 } else {
4284 nan_buf->count++;
4285 nan_buf->is_set = true;
4286 memset(resp_buf, 0, sizeof(resp_buf));
4287 ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
4288 nan_buf, nan_buf_size, &status,
4289 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4290 if (unlikely(ret) || unlikely(status)) {
4291 WL_ERR(("nan init handler failed ret %d status %d\n",
4292 ret, status));
4293 }
4294 }
4295 }
4296
4297 for (i = 0; i < NAN_MAX_NDI; i++) {
4298 /* clean NDI data */
4299 cfg->nancfg.ndi[i].in_use = false;
4300 cfg->nancfg.ndi[i].created = false;
4301 memset(&cfg->nancfg.ndi[i].ifname, 0x0, IFNAMSIZ);
4302 }
4303
4304 cfg->nan_dp_mask = 0;
4305 cfg->nan_init_state = false;
4306 #ifdef WL_NAN_DISC_CACHE
4307 if (cfg->nan_disc_cache) {
4308 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
4309 if (cfg->nan_disc_cache[i].tx_match_filter.data) {
4310 MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
4311 cfg->nan_disc_cache[i].tx_match_filter.dlen);
4312 }
4313 if (cfg->nan_disc_cache[i].svc_info.data) {
4314 MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
4315 cfg->nan_disc_cache[i].svc_info.dlen);
4316 }
4317 }
4318 MFREE(cfg->osh, cfg->nan_disc_cache,
4319 NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
4320 cfg->nan_disc_cache = NULL;
4321 }
4322 cfg->nan_disc_count = 0;
4323 memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
4324 memset(cfg->nan_ranging_info, 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
4325 #endif /* WL_NAN_DISC_CACHE */
4326 fail:
4327 if (!cfg->nancfg.mac_rand) {
4328 wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
4329 }
4330 NAN_MUTEX_UNLOCK();
4331 NAN_DBG_EXIT();
4332 return ret;
4333 }
4334
4335 static int
4336 wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
4337 {
4338 int i = 0;
4339 int ret = BCME_OK;
4340 bool rand_mac = cfg->nancfg.mac_rand;
4341 BCM_REFERENCE(i);
4342
4343 if (rand_mac) {
4344 /* ensure nmi != ndi */
4345 do {
4346 RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
4347 /* restore mcast and local admin bits to 0 and 1 */
4348 ETHER_SET_UNICAST(mac_addr);
4349 ETHER_SET_LOCALADDR(mac_addr);
4350 i++;
4351 if (i == NAN_RAND_MAC_RETRIES) {
4352 break;
4353 }
4354 } while (eacmp(cfg->nan_nmi_mac, mac_addr) == 0);
4355
4356 if (i == NAN_RAND_MAC_RETRIES) {
4357 if (eacmp(cfg->nan_nmi_mac, mac_addr) == 0) {
4358 WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
4359 ret = BCME_NORESOURCE;
4360 goto fail;
4361 }
4362 }
4363 } else {
4364 if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
4365 mac_addr) != BCME_OK) {
4366 ret = -EINVAL;
4367 goto fail;
4368 }
4369 }
4370
4371 fail:
4372 return ret;
4373 }
4374
4375 int
4376 wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
4377 struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
4378 {
4379 u8 mac_addr[ETH_ALEN];
4380 s32 ret = BCME_OK;
4381 NAN_DBG_ENTER();
4382
4383 if (busstate != DHD_BUS_DOWN) {
4384 if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
4385 ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
4386 if (ret != BCME_OK) {
4387 WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
4388 goto fail;
4389 }
4390 if (wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
4391 ifname, mac_addr) == NULL) {
4392 ret = -ENODEV;
4393 goto fail;
4394 }
4395 } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
4396 ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
4397 }
4398 } else {
4399 ret = -ENODEV;
4400 WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
4401 }
4402 fail:
4403 NAN_DBG_EXIT();
4404 return ret;
4405 }
4406
4407 int
4408 wl_cfgnan_data_path_request_handler(struct net_device *ndev,
4409 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
4410 uint8 *ndp_instance_id)
4411 {
4412 s32 ret = BCME_OK;
4413 bcm_iov_batch_buf_t *nan_buf = NULL;
4414 wl_nan_dp_req_t *datareq = NULL;
4415 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4416 uint16 buflen_avail;
4417 uint8 *pxtlv;
4418 struct wireless_dev *wdev;
4419
4420 uint16 nan_buf_size;
4421 uint8 *resp_buf = NULL;
4422 /* Considering fixed params */
4423 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
4424 OFFSETOF(wl_nan_dp_req_t, tlv_params);
4425 data_size = ALIGN_SIZE(data_size, 4);
4426
4427 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
4428 if (unlikely(ret)) {
4429 WL_ERR(("Failed to get alligned size of optional params\n"));
4430 goto fail;
4431 }
4432
4433 nan_buf_size = data_size;
4434 NAN_DBG_ENTER();
4435 NAN_MUTEX_LOCK();
4436
4437 nan_buf = MALLOCZ(cfg->osh, data_size);
4438 if (!nan_buf) {
4439 WL_ERR(("%s: memory allocation failed\n", __func__));
4440 ret = BCME_NOMEM;
4441 goto fail;
4442 }
4443
4444 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
4445 if (!resp_buf) {
4446 WL_ERR(("%s: memory allocation failed\n", __func__));
4447 ret = BCME_NOMEM;
4448 goto fail;
4449 }
4450
4451 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4452 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
4453 if (unlikely(ret)) {
4454 WL_ERR(("Failed to set avail value with type local\n"));
4455 goto fail;
4456 }
4457
4458 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4459 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
4460 if (unlikely(ret)) {
4461 WL_ERR(("Failed to set avail value with type ndc\n"));
4462 goto fail;
4463 }
4464
4465 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4466 nan_buf->count = 0;
4467 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4468
4469 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4470 datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
4471
4472 /* setting default data path type to unicast */
4473 datareq->type = WL_NAN_DP_TYPE_UNICAST;
4474
4475 if (cmd_data->pub_id) {
4476 datareq->pub_id = cmd_data->pub_id;
4477 }
4478
4479 if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
4480 memcpy(&datareq->peer_mac, &cmd_data->mac_addr, ETHER_ADDR_LEN);
4481 } else {
4482 WL_ERR(("Invalid ether addr provided\n"));
4483 ret = BCME_BADARG;
4484 goto fail;
4485 }
4486
4487 /* Retrieve mac from given iface name */
4488 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
4489 (char *)cmd_data->ndp_iface);
4490 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
4491 ret = -EINVAL;
4492 goto fail;
4493 }
4494
4495 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
4496 memcpy(&datareq->ndi, wdev->netdev->dev_addr, ETHER_ADDR_LEN);
4497 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
4498 __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
4499 } else {
4500 WL_ERR(("Invalid NDI addr retrieved\n"));
4501 ret = BCME_BADARG;
4502 goto fail;
4503 }
4504
4505 datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
4506 datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
4507
4508 /* Fill the sub_command block */
4509 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
4510 sub_cmd->len = sizeof(sub_cmd->u.options) +
4511 OFFSETOF(wl_nan_dp_req_t, tlv_params);
4512 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4513 pxtlv = (uint8 *)&datareq->tlv_params;
4514
4515 nan_buf_size -= (sub_cmd->len +
4516 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4517 buflen_avail = nan_buf_size;
4518
4519 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
4520 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4521 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
4522 cmd_data->svc_info.data,
4523 BCM_XTLV_OPTION_ALIGN32);
4524 if (ret != BCME_OK) {
4525 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
4526 goto fail;
4527 }
4528 datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
4529 }
4530
4531 /* Security elements */
4532
4533 if (cmd_data->csid) {
4534 WL_TRACE(("Cipher suite type is present, pack it\n"));
4535 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4536 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
4537 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
4538 if (unlikely(ret)) {
4539 WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
4540 goto fail;
4541 }
4542 }
4543
4544 if (cmd_data->ndp_cfg.security_cfg) {
4545 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
4546 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
4547 if (cmd_data->key.data && cmd_data->key.dlen) {
4548 WL_TRACE(("optional pmk present, pack it\n"));
4549 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4550 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
4551 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
4552 if (unlikely(ret)) {
4553 WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
4554 __FUNCTION__));
4555 goto fail;
4556 }
4557 }
4558 } else {
4559 WL_ERR(("Invalid security key type\n"));
4560 ret = BCME_BADARG;
4561 goto fail;
4562 }
4563
4564 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
4565 (cmd_data->svc_hash.data)) {
4566 WL_TRACE(("svc hash present, pack it\n"));
4567 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4568 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
4569 cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
4570 if (ret != BCME_OK) {
4571 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4572 __FUNCTION__));
4573 goto fail;
4574 }
4575 } else {
4576 #ifdef WL_NAN_DISC_CACHE
4577 /* check in cache */
4578 nan_disc_result_cache *cache;
4579 cache = wl_cfgnan_get_disc_result(cfg,
4580 datareq->pub_id, &datareq->peer_mac);
4581 if (!cache) {
4582 ret = BCME_ERROR;
4583 WL_ERR(("invalid svc hash data or length = %d\n",
4584 cmd_data->svc_hash.dlen));
4585 goto fail;
4586 }
4587 WL_TRACE(("svc hash present, pack it\n"));
4588 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4589 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
4590 cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
4591 if (ret != BCME_OK) {
4592 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4593 __FUNCTION__));
4594 goto fail;
4595 }
4596 #else
4597 ret = BCME_ERROR;
4598 WL_ERR(("invalid svc hash data or length = %d\n",
4599 cmd_data->svc_hash.dlen));
4600 goto fail;
4601 #endif /* WL_NAN_DISC_CACHE */
4602 }
4603 /* If the Data req is for secure data connection */
4604 datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
4605 }
4606
4607 sub_cmd->len += (buflen_avail - nan_buf_size);
4608 nan_buf->is_set = false;
4609 nan_buf->count++;
4610
4611 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4612 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4613 if (unlikely(ret) || unlikely(cmd_data->status)) {
4614 WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
4615 ret, cmd_data->status));
4616 goto fail;
4617 }
4618
4619 /* check the response buff */
4620 if (ret == BCME_OK) {
4621 ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
4622 ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
4623 cmd_data->ndp_instance_id = *ndp_instance_id;
4624 }
4625 WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
4626 cmd_data->ndp_instance_id));
4627
4628 fail:
4629 if (nan_buf) {
4630 MFREE(cfg->osh, nan_buf, data_size);
4631 }
4632
4633 if (resp_buf) {
4634 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4635 }
4636
4637 NAN_MUTEX_UNLOCK();
4638 NAN_DBG_EXIT();
4639 return ret;
4640 }
4641
4642 int
4643 wl_cfgnan_data_path_response_handler(struct net_device *ndev,
4644 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
4645 {
4646 s32 ret = BCME_OK;
4647 bcm_iov_batch_buf_t *nan_buf = NULL;
4648 wl_nan_dp_resp_t *dataresp = NULL;
4649 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4650 uint16 buflen_avail;
4651 uint8 *pxtlv;
4652 struct wireless_dev *wdev;
4653 uint16 nan_buf_size;
4654 uint8 *resp_buf = NULL;
4655
4656 /* Considering fixed params */
4657 uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
4658 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
4659 data_size = ALIGN_SIZE(data_size, 4);
4660 ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
4661 if (unlikely(ret)) {
4662 WL_ERR(("Failed to get alligned size of optional params\n"));
4663 goto fail;
4664 }
4665 nan_buf_size = data_size;
4666
4667 NAN_DBG_ENTER();
4668 NAN_MUTEX_LOCK();
4669
4670 nan_buf = MALLOCZ(cfg->osh, data_size);
4671 if (!nan_buf) {
4672 WL_ERR(("%s: memory allocation failed\n", __func__));
4673 ret = BCME_NOMEM;
4674 goto fail;
4675 }
4676
4677 resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
4678 if (!resp_buf) {
4679 WL_ERR(("%s: memory allocation failed\n", __func__));
4680 ret = BCME_NOMEM;
4681 goto fail;
4682 }
4683
4684 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4685 cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
4686 if (unlikely(ret)) {
4687 WL_ERR(("Failed to set avail value with type local\n"));
4688 goto fail;
4689 }
4690
4691 ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
4692 cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
4693 if (unlikely(ret)) {
4694 WL_ERR(("Failed to set avail value with type ndc\n"));
4695 goto fail;
4696 }
4697
4698 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4699 nan_buf->count = 0;
4700 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4701
4702 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4703 dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
4704
4705 /* Setting default data path type to unicast */
4706 dataresp->type = WL_NAN_DP_TYPE_UNICAST;
4707 /* Changing status value as per fw convention */
4708 dataresp->status = cmd_data->rsp_code ^= 1;
4709 dataresp->reason_code = 0;
4710
4711 /* ndp instance id must be from 0 to 255 */
4712 if (cmd_data->ndp_instance_id <= NAN_ID_MIN ||
4713 cmd_data->ndp_instance_id > NAN_ID_MAX) {
4714 WL_ERR(("Invalid ndp instance id\n"));
4715 ret = BCME_BADARG;
4716 goto fail;
4717 }
4718 dataresp->ndp_id = cmd_data->ndp_instance_id;
4719
4720 /* Retrieved initiator ndi from NanDataPathRequestInd */
4721 if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
4722 memcpy(&dataresp->mac_addr, &cfg->initiator_ndi, ETHER_ADDR_LEN);
4723 } else {
4724 WL_ERR(("Invalid ether addr retrieved\n"));
4725 ret = BCME_BADARG;
4726 goto fail;
4727 }
4728
4729 /* Retrieve mac from given iface name */
4730 wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
4731 (char *)cmd_data->ndp_iface);
4732 if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
4733 ret = -EINVAL;
4734 goto fail;
4735 }
4736
4737 if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
4738 memcpy(&dataresp->ndi, wdev->netdev->dev_addr, ETHER_ADDR_LEN);
4739 WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
4740 __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
4741 } else {
4742 WL_ERR(("Invalid NDI addr retrieved\n"));
4743 ret = BCME_BADARG;
4744 goto fail;
4745 }
4746
4747 dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
4748 dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
4749
4750 /* Fill the sub_command block */
4751 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
4752 sub_cmd->len = sizeof(sub_cmd->u.options) +
4753 OFFSETOF(wl_nan_dp_resp_t, tlv_params);
4754 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4755 pxtlv = (uint8 *)&dataresp->tlv_params;
4756
4757 nan_buf_size -= (sub_cmd->len +
4758 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4759 buflen_avail = nan_buf_size;
4760
4761 if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
4762 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4763 WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
4764 cmd_data->svc_info.data,
4765 BCM_XTLV_OPTION_ALIGN32);
4766 if (ret != BCME_OK) {
4767 WL_ERR(("unable to process svc_spec_info: %d\n", ret));
4768 goto fail;
4769 }
4770 dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
4771 }
4772
4773 /* Security elements */
4774 if (cmd_data->csid) {
4775 WL_TRACE(("Cipher suite type is present, pack it\n"));
4776 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4777 WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
4778 (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
4779 if (unlikely(ret)) {
4780 WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
4781 goto fail;
4782 }
4783 }
4784
4785 if (cmd_data->ndp_cfg.security_cfg) {
4786 if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
4787 (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
4788 if (cmd_data->key.data && cmd_data->key.dlen) {
4789 WL_TRACE(("optional pmk present, pack it\n"));
4790 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4791 WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
4792 cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
4793 if (unlikely(ret)) {
4794 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
4795 __FUNCTION__));
4796 goto fail;
4797 }
4798 }
4799 } else {
4800 WL_ERR(("Invalid security key type\n"));
4801 ret = BCME_BADARG;
4802 goto fail;
4803 }
4804
4805 if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
4806 (cmd_data->svc_hash.data)) {
4807 WL_TRACE(("svc hash present, pack it\n"));
4808 ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
4809 WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
4810 cmd_data->svc_hash.data,
4811 BCM_XTLV_OPTION_ALIGN32);
4812 if (ret != BCME_OK) {
4813 WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
4814 __FUNCTION__));
4815 goto fail;
4816 }
4817 }
4818 /* If the Data resp is for secure data connection */
4819 dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
4820 }
4821
4822 sub_cmd->len += (buflen_avail - nan_buf_size);
4823
4824 nan_buf->is_set = false;
4825 nan_buf->count++;
4826 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
4827 &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4828 if (unlikely(ret) || unlikely(cmd_data->status)) {
4829 WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
4830 ret, cmd_data->status));
4831 goto fail;
4832 }
4833
4834 WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
4835
4836 fail:
4837 if (nan_buf) {
4838 MFREE(cfg->osh, nan_buf, data_size);
4839 }
4840
4841 if (resp_buf) {
4842 MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
4843 }
4844
4845 NAN_MUTEX_UNLOCK();
4846 NAN_DBG_EXIT();
4847 return ret;
4848 }
4849
4850 int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
4851 struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
4852 {
4853 bcm_iov_batch_buf_t *nan_buf = NULL;
4854 wl_nan_dp_end_t *dataend = NULL;
4855 bcm_iov_batch_subcmd_t *sub_cmd = NULL;
4856 s32 ret = BCME_OK;
4857 uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
4858 uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
4859
4860 dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
4861
4862 NAN_DBG_ENTER();
4863 NAN_MUTEX_LOCK();
4864
4865 if (!dhdp->up) {
4866 WL_ERR(("bus is already down, hence blocking nan dp end\n"));
4867 ret = BCME_OK;
4868 goto fail;
4869 }
4870
4871 if (!cfg->nan_enable) {
4872 WL_ERR(("nan is not enabled, nan dp end blocked\n"));
4873 ret = BCME_OK;
4874 goto fail;
4875 }
4876
4877 /* ndp instance id must be from 0 to 255 */
4878 if (cmd_data->ndp_instance_id <= NAN_ID_MIN ||
4879 cmd_data->ndp_instance_id > NAN_ID_MAX) {
4880 WL_ERR(("Invalid ndp instance id\n"));
4881 ret = BCME_BADARG;
4882 goto fail;
4883 }
4884
4885 nan_buf = MALLOCZ(dhdp->osh, nan_buf_size);
4886 if (!nan_buf) {
4887 WL_ERR(("%s: memory allocation failed\n", __func__));
4888 ret = BCME_NOMEM;
4889 goto fail;
4890 }
4891
4892 nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
4893 nan_buf->count = 0;
4894 nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
4895
4896 sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
4897 dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
4898
4899 /* Fill sub_cmd block */
4900 sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
4901 sub_cmd->len = sizeof(sub_cmd->u.options) +
4902 sizeof(*dataend);
4903 sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
4904
4905 dataend->lndp_id = cmd_data->ndp_instance_id;
4906
4907 /*
4908 * Currently fw requires ndp_id and reason to end the data path
4909 * But wifi_nan.h takes ndp_instances_count and ndp_id.
4910 * Will keep reason = accept always.
4911 */
4912
4913 dataend->status = 1;
4914
4915 nan_buf->is_set = true;
4916 nan_buf->count++;
4917
4918 nan_buf_size -= (sub_cmd->len +
4919 OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
4920 memset(resp_buf, 0, sizeof(resp_buf));
4921 ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
4922 &(cmd_data->status),
4923 (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
4924 if (unlikely(ret) || unlikely(cmd_data->status)) {
4925 WL_ERR(("nan data path end handler failed, error = %d status %d\n",
4926 ret, cmd_data->status));
4927 goto fail;
4928 }
4929 WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
4930 dataend->lndp_id));
4931
4932 fail:
4933 if (nan_buf) {
4934 MFREE(dhdp->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
4935 }
4936
4937 NAN_MUTEX_UNLOCK();
4938 NAN_DBG_EXIT();
4939 return ret;
4940 }
4941
4942 #ifdef WL_NAN_DISC_CACHE
4943 int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
4944 nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
4945 {
4946 s32 ret = BCME_NOTFOUND;
4947 /* check in cache */
4948 nan_disc_result_cache *disc_cache = NULL;
4949 nan_svc_info_t *svc_info = NULL;
4950
4951 NAN_DBG_ENTER();
4952 NAN_MUTEX_LOCK();
4953
4954 if (!cfg->nan_init_state) {
4955 WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
4956 ret = BCME_NOTENABLED;
4957 goto fail;
4958 }
4959
4960 /* datapath request context */
4961 if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
4962 disc_cache = wl_cfgnan_get_disc_result(cfg,
4963 cmd_data->pub_id, &cmd_data->mac_addr);
4964 if (disc_cache) {
4965 WL_TRACE(("svc hash present, pack it\n"));
4966 memcpy(nan_req_resp->svc_hash, disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
4967 ret = BCME_OK;
4968 }
4969 } else {
4970 WL_ERR(("Missing mandatory info..pub id %d & pub_mac "MACDBG"\n",
4971 cmd_data->pub_id, MAC2STRDBG(cmd_data->mac_addr.octet)));
4972 ret = BCME_BADARG;
4973 }
4974
4975 /* datapath response context */
4976 if (cmd_data->ndp_instance_id) {
4977 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
4978 /* Note: svc_info will not be present in OOB cases
4979 * In such case send NMI alone and let HAL handle if
4980 * svc_hash is mandatory
4981 */
4982 if (svc_info) {
4983 WL_TRACE(("svc hash present, pack it\n"));
4984 memcpy(nan_req_resp->svc_hash, svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
4985 } else {
4986 WL_MEM(("svc_info not present..assuming OOB DP\n"));
4987 }
4988 /* Always send NMI */
4989 memcpy(nan_req_resp->pub_nmi, cfg->nan_nmi_mac, ETHER_ADDR_LEN);
4990 ret = BCME_OK;
4991 } else {
4992 WL_ERR(("Invalid ndp id\n"));
4993 }
4994 fail:
4995 NAN_MUTEX_UNLOCK();
4996 NAN_DBG_EXIT();
4997 return ret;
4998 }
4999
5000 static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
5001 nan_event_data_t *nan_event_data, osl_t *osh)
5002 {
5003 s32 ret = BCME_OK;
5004 NAN_DBG_ENTER();
5005
5006 nan_event_data->pub_id = cache->pub_id;
5007 nan_event_data->sub_id = cache->sub_id;
5008 nan_event_data->publish_rssi = cache->publish_rssi;
5009 nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
5010 memcpy(&nan_event_data->remote_nmi, &cache->peer, ETHER_ADDR_LEN);
5011
5012 if (cache->svc_info.dlen && cache->svc_info.data) {
5013 nan_event_data->svc_info.dlen = cache->svc_info.dlen;
5014 nan_event_data->svc_info.data =
5015 MALLOCZ(osh, nan_event_data->svc_info.dlen);
5016 if (!nan_event_data->svc_info.data) {
5017 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5018 nan_event_data->svc_info.dlen = 0;
5019 ret = -ENOMEM;
5020 goto fail;
5021 }
5022 memcpy(nan_event_data->svc_info.data,
5023 cache->svc_info.data, cache->svc_info.dlen);
5024 }
5025 if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
5026 nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
5027 nan_event_data->tx_match_filter.data =
5028 MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
5029 if (!nan_event_data->tx_match_filter.data) {
5030 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5031 nan_event_data->tx_match_filter.dlen = 0;
5032 ret = -ENOMEM;
5033 goto fail;
5034 }
5035 memcpy(nan_event_data->tx_match_filter.data,
5036 cache->tx_match_filter.data, cache->tx_match_filter.dlen);
5037 }
5038 fail:
5039 NAN_DBG_EXIT();
5040 return ret;
5041 }
5042 #endif /* WL_NAN_DISC_CACHE */
5043 static s32
5044 wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
5045 uint16 data_len, uint16 *tlvs_offset,
5046 uint16 *nan_opts_len, uint32 event_num,
5047 int *hal_event_id, nan_event_data_t *nan_event_data)
5048 {
5049 s32 ret = BCME_OK;
5050 uint8 i;
5051 wl_nan_ev_datapath_cmn_t *ev_dp;
5052 nan_svc_info_t *svc_info;
5053 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5054 if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
5055 ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
5056 NAN_DBG_ENTER();
5057
5058 BCM_REFERENCE(svc_info);
5059 BCM_REFERENCE(i);
5060 /* Mapping to common struct between DHD and HAL */
5061 WL_TRACE(("Event type: %d\n", ev_dp->type));
5062 nan_event_data->type = ev_dp->type;
5063 WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
5064 nan_event_data->pub_id = ev_dp->pub_id;
5065 WL_TRACE(("security: %d\n", ev_dp->security));
5066 nan_event_data->security = ev_dp->security;
5067
5068 /* Store initiator_ndi, required for data_path_response_request */
5069 memcpy(&cfg->initiator_ndi, &ev_dp->initiator_ndi,
5070 ETHER_ADDR_LEN);
5071 if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
5072 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
5073 nan_event_data->ndp_id = ev_dp->ndp_id;
5074 WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
5075 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
5076 WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
5077 MAC2STRDBG(ev_dp->responder_ndi.octet)));
5078 WL_TRACE(("PEER NMI: " MACDBG "\n",
5079 MAC2STRDBG(ev_dp->peer_nmi.octet)));
5080 memcpy(&nan_event_data->remote_nmi, &ev_dp->peer_nmi,
5081 ETHER_ADDR_LEN);
5082 } else {
5083 /* type is multicast */
5084 WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
5085 nan_event_data->ndp_id = ev_dp->mc_id;
5086 WL_TRACE(("PEER NMI: " MACDBG "\n",
5087 MAC2STRDBG(ev_dp->peer_nmi.octet)));
5088 memcpy(&nan_event_data->remote_nmi, &ev_dp->peer_nmi,
5089 ETHER_ADDR_LEN);
5090 }
5091 *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
5092 OFFSETOF(bcm_xtlv_t, data);
5093 *nan_opts_len = data_len - *tlvs_offset;
5094 if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
5095 *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
5096 #ifdef WL_NAN_DISC_CACHE
5097 svc_info = wl_cfgnan_get_svc_inst(cfg, nan_event_data->pub_id, 0);
5098 if (svc_info) {
5099 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
5100 if (!svc_info->ndp_id[i]) {
5101 WL_TRACE(("Found empty field\n"));
5102 break;
5103 }
5104 }
5105 if (i == NAN_MAX_SVC_INST) {
5106 WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
5107 ret = BCME_NORESOURCE;
5108 goto fail;
5109 }
5110 svc_info->ndp_id[i] = nan_event_data->ndp_id;
5111 ret = BCME_OK;
5112 }
5113 #endif /* WL_NAN_DISC_CACHE */
5114 } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
5115 *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
5116 if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
5117 memcpy(&nan_event_data->responder_ndi, &ev_dp->responder_ndi,
5118 ETHER_ADDR_LEN);
5119 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
5120 MAC2STRDBG(ev_dp->responder_ndi.octet)));
5121 WL_TRACE(("Initiator status %d\n", nan_event_data->status));
5122 } else {
5123 memcpy(&nan_event_data->responder_ndi, &ev_dp->initiator_ndi,
5124 ETHER_ADDR_LEN);
5125 WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
5126 MAC2STRDBG(ev_dp->initiator_ndi.octet)));
5127 }
5128 if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
5129 nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
5130 } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
5131 nan_event_data->status = NAN_DP_REQUEST_REJECT;
5132 } else {
5133 WL_ERR(("%s:Status code = %x not expected\n",
5134 __FUNCTION__, ev_dp->status));
5135 ret = BCME_ERROR;
5136 goto fail;
5137 }
5138 WL_TRACE(("Responder status %d\n", nan_event_data->status));
5139 wl_cfgnan_update_dp_mask(cfg, true, nan_event_data->ndp_id);
5140 } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
5141 /* Mapping to common struct between DHD and HAL */
5142 *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
5143 wl_cfgnan_update_dp_mask(cfg, false, nan_event_data->ndp_id);
5144 #ifdef WL_NAN_DISC_CACHE
5145 if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
5146 /* Only at Responder side,
5147 * If dp is ended,
5148 * clear the resp ndp id from the svc info cache
5149 */
5150 svc_info = wl_cfgnan_get_svc_inst(cfg, 0, nan_event_data->ndp_id);
5151 if (svc_info) {
5152 for (i = 0; i < NAN_MAX_SVC_INST; i++) {
5153 if (svc_info->ndp_id[i] == nan_event_data->ndp_id) {
5154 svc_info->ndp_id[i] = 0;
5155 }
5156 }
5157 } else {
5158 WL_DBG(("couldn't find entry for ndp id = %d\n",
5159 nan_event_data->ndp_id));
5160 }
5161 }
5162 #endif /* WL_NAN_DISC_CACHE */
5163 }
5164 } else {
5165 /* Follow though, not handling other IDs as of now */
5166 WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
5167 }
5168 fail:
5169 NAN_DBG_EXIT();
5170 return ret;
5171 }
5172
5173 static void
5174 wl_nan_print_status(wl_nan_conf_status_t *nstatus)
5175 {
5176 printf("> enabled: %d\n", nstatus->enabled);
5177 printf("> Current NMI: " MACDBG "\n", MAC2STRDBG(nstatus->nmi.octet));
5178 printf("> Current cluster_id: " MACDBG "\n", MAC2STRDBG(nstatus->cid.octet));
5179
5180 switch (nstatus->role) {
5181 case WL_NAN_ROLE_AUTO:
5182 printf("> role: %s (%d)\n", "auto", nstatus->role);
5183 break;
5184 case WL_NAN_ROLE_NON_MASTER_NON_SYNC:
5185 printf("> role: %s (%d)\n", "non-master-non-sync", nstatus->role);
5186 break;
5187 case WL_NAN_ROLE_NON_MASTER_SYNC:
5188 printf("> role: %s (%d)\n", "non-master-sync", nstatus->role);
5189 break;
5190 case WL_NAN_ROLE_MASTER:
5191 printf("> role: %s (%d)\n", "master", nstatus->role);
5192 break;
5193 case WL_NAN_ROLE_ANCHOR_MASTER:
5194 printf("> role: %s (%d)\n", "anchor-master", nstatus->role);
5195 break;
5196 default:
5197 printf("> role: %s (%d)\n", "undefined", nstatus->role);
5198 break;
5199 }
5200
5201 printf("> social channels: %d, %d\n",
5202 nstatus->social_chans[0], nstatus->social_chans[1]);
5203 printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
5204 printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
5205 printf("> hop_count: %d\n", nstatus->hop_count);
5206 printf("> ambtt: %d\n", nstatus->ambtt);
5207 }
5208
5209 s32
5210 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
5211 bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
5212 {
5213 uint16 data_len;
5214 uint32 event_num;
5215 s32 event_type;
5216 int hal_event_id = 0;
5217 nan_event_data_t *nan_event_data = NULL;
5218 nan_parse_event_ctx_t nan_event_ctx;
5219 uint16 tlvs_offset = 0;
5220 uint16 nan_opts_len = 0;
5221 uint8 *tlv_buf;
5222 s32 ret = BCME_OK;
5223 bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
5224 nan_svc_info_t *svc;
5225
5226 UNUSED_PARAMETER(wl_nan_print_status);
5227 NAN_DBG_ENTER();
5228 NAN_MUTEX_LOCK();
5229
5230 if (!cfg->nan_init_state) {
5231 WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
5232 ret = BCME_OK;
5233 goto exit;
5234 }
5235 if (!event || !event_data) {
5236 WL_ERR(("event data is NULL\n"));
5237 ret = -EINVAL;
5238 goto exit;
5239 }
5240
5241 event_type = ntoh32(event->event_type);
5242 event_num = ntoh32(event->reason);
5243 data_len = ntoh32(event->datalen);
5244 nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
5245 if (!nan_event_data) {
5246 WL_ERR(("%s: memory allocation failed\n", __func__));
5247 goto exit;
5248 }
5249
5250 if (NAN_INVALID_EVENT(event_num)) {
5251 WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
5252 ret = -EINVAL;
5253 goto exit;
5254 }
5255 WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
5256 nan_event_to_str(event_num), event_num, data_len));
5257
5258 #ifdef WL_NAN_DEBUG
5259 prhex("nan_event_data:", event_data, data_len);
5260 #endif /* WL_NAN_DEBUG */
5261
5262 nan_event_ctx.cfg = cfg;
5263 nan_event_ctx.nan_evt_data = nan_event_data;
5264 /*
5265 * send as preformatted hex string
5266 * EVENT_NAN <event_type> <tlv_hex_string>
5267 */
5268 switch (event_num) {
5269 case WL_NAN_EVENT_START:
5270 case WL_NAN_EVENT_MERGE:
5271 case WL_NAN_EVENT_ROLE: {
5272 /* get nan status info as-is */
5273 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5274 wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
5275 WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
5276 nan_event_to_str(event_num), event_num, data_len));
5277 WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
5278 /* Mapping to common struct between DHD and HAL */
5279 nan_event_data->enabled = nstatus->enabled;
5280 memcpy(&nan_event_data->local_nmi, &nstatus->nmi,
5281 ETHER_ADDR_LEN);
5282 memcpy(&nan_event_data->clus_id, &nstatus->cid,
5283 ETHER_ADDR_LEN);
5284 nan_event_data->nan_de_evt_type = event_num;
5285 #ifdef WL_NAN_DEBUG
5286 wl_nan_print_status(nstatus);
5287 #endif /* WL_NAN_DEBUG */
5288 if (event_num == WL_NAN_EVENT_START) {
5289 cfg->nan_enable = true;
5290 OSL_SMP_WMB();
5291 cfg->nancfg.nan_event_recvd = true;
5292 OSL_SMP_WMB();
5293 wake_up(&cfg->nancfg.nan_event_wait);
5294 }
5295 hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
5296 break;
5297 }
5298
5299 case WL_NAN_EVENT_STOP: {
5300 WL_INFORM_MEM((">> Nan Mac Stop Event Received\n"));
5301 hal_event_id = GOOGLE_NAN_EVENT_DISABLED;
5302 OSL_SMP_WMB();
5303 cfg->nancfg.nan_event_recvd = true;
5304 OSL_SMP_WMB();
5305 wake_up(&cfg->nancfg.nan_event_wait);
5306 cfg->nancfg.inst_id_start = 0;
5307 memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
5308 memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
5309 if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
5310 /* do not event to host if command is from host */
5311 goto exit;
5312 } else if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
5313 nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
5314 } else {
5315 nan_event_data->status = NAN_STATUS_SUCCESS;
5316 }
5317 break;
5318 }
5319 case WL_NAN_EVENT_TERMINATED: {
5320 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5321 wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
5322
5323 /* Mapping to common struct between DHD and HAL */
5324 WL_TRACE(("Instance ID: %d\n", pev->instance_id));
5325 nan_event_data->local_inst_id = pev->instance_id;
5326 WL_TRACE(("Service Type: %d\n", pev->svctype));
5327
5328 #ifdef WL_NAN_DISC_CACHE
5329 if (pev->svctype == NAN_SC_SUBSCRIBE) {
5330 wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
5331 }
5332 #endif /* WL_NAN_DISC_CACHE */
5333 /* Mapping reason code of FW to status code of framework */
5334 if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
5335 pev->reason == NAN_TERM_REASON_USER_REQ ||
5336 pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
5337 nan_event_data->status = NAN_STATUS_SUCCESS;
5338 memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
5339 strlen("NAN_STATUS_SUCCESS"));
5340 } else {
5341 nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
5342 memcpy(nan_event_data->nan_reason, "NAN_STATUS_INTERNAL_FAILURE",
5343 strlen("NAN_STATUS_INTERNAL_FAILURE"));
5344 }
5345
5346 if (pev->svctype == NAN_SC_SUBSCRIBE) {
5347 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
5348 } else {
5349 hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
5350 }
5351 #ifdef WL_NAN_DISC_CACHE
5352 /* terminate ranging sessions */
5353 wl_cfgnan_terminate_ranging_sessions(bcmcfg_to_prmry_ndev(cfg),
5354 cfg, pev->instance_id);
5355 #endif /* WL_NAN_DISC_CACHE */
5356 break;
5357 }
5358
5359 case WL_NAN_EVENT_RECEIVE: {
5360 nan_opts_len = data_len;
5361 hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
5362 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
5363 break;
5364 }
5365
5366 case WL_NAN_EVENT_TXS: {
5367 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5368 wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
5369 wl_nan_event_sd_txs_t *txs_sd = NULL;
5370 if (txs->status == WL_NAN_TXS_SUCCESS) {
5371 WL_MEM(("TXS success for type %d token %d",
5372 txs->type, txs->host_seq));
5373 nan_event_data->status = NAN_STATUS_SUCCESS;
5374 memcpy(nan_event_data->nan_reason, "NAN_STATUS_SUCCESS",
5375 strlen("NAN_STATUS_SUCCESS"));
5376 } else {
5377 /* TODO : populate status based on reason codes
5378 For now adding it as no ACK, so that app/framework can retry
5379 */
5380 WL_INFORM_MEM(("TXS failed for type %d status %d token %d",
5381 txs->type, txs->status, txs->host_seq));
5382 nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
5383 memcpy(nan_event_data->nan_reason, "NAN_STATUS_NO_OTA_ACK",
5384 strlen("NAN_STATUS_NO_OTA_ACK"));
5385 }
5386 nan_event_data->reason = txs->reason_code;
5387 nan_event_data->token = txs->host_seq;
5388 if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
5389 hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
5390 xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
5391 if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
5392 txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
5393 nan_event_data->local_inst_id = txs_sd->inst_id;
5394 } else {
5395 WL_ERR(("Invalid params in TX status for trasnmit followup"));
5396 ret = -EINVAL;
5397 goto exit;
5398 }
5399 } else { /* TODO: add for other frame types if required */
5400 ret = -EINVAL;
5401 goto exit;
5402 }
5403 break;
5404 }
5405
5406 case WL_NAN_EVENT_DISCOVERY_RESULT: {
5407 nan_opts_len = data_len;
5408 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
5409 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
5410 break;
5411 }
5412 #ifdef WL_NAN_DISC_CACHE
5413 case WL_NAN_EVENT_RNG_RPT_IND: {
5414 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5415 wl_nan_ev_rng_rpt_ind_t *range_res = (wl_nan_ev_rng_rpt_ind_t *)xtlv->data;
5416 nan_disc_result_cache *cache;
5417 nan_event_data->ranging_result_present = 1;
5418 nan_event_data->range_measurement_cm = range_res->dist_mm/10;
5419 memcpy(&nan_event_data->remote_nmi, &range_res->peer_m_addr, ETHER_ADDR_LEN);
5420 nan_event_data->ranging_ind = range_res->indication;
5421 WL_TRACE(("ranging ind = %d\n", range_res->indication));
5422 /* check in cache */
5423 cache = wl_cfgnan_get_disc_result(cfg,
5424 0, &range_res->peer_m_addr);
5425 if (!cache) {
5426 ret = BCME_ERROR;
5427 WL_ERR(("Disc Cache entry not present for peer: " MACDBG "\n",
5428 MAC2STRDBG(range_res->peer_m_addr.octet)));
5429 goto exit;
5430 }
5431 WL_TRACE(("Disc cache entry, populate it\n"));
5432 ret = wl_nan_cache_to_event_data(cache,
5433 nan_event_data, cfg->osh);
5434 if (ret != BCME_OK) {
5435 goto exit;
5436 }
5437 break;
5438 }
5439 case WL_NAN_EVENT_RNG_REQ_IND: {
5440 wl_nan_ev_rng_req_ind_t *rng_ind;
5441 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5442
5443 nan_opts_len = data_len;
5444 rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
5445 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
5446 WL_TRACE(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d\n",
5447 rng_ind->rng_id));
5448 ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
5449 /* no need to event to HAL */
5450 goto exit;
5451 }
5452
5453 case WL_NAN_EVENT_RNG_TERM_IND: {
5454 bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
5455 nan_ranging_inst_t *rng_inst;
5456 wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
5457 WL_TRACE(("Peer_NMI: " MACDBG "\n",
5458 MAC2STRDBG(range_term->peer_m_addr.octet)));
5459 WL_TRACE(("Reason code:%d\n", range_term->reason_code));
5460 WL_TRACE(("Received WL_NAN_EVENT_RNG_TERM_IND\n"));
5461 rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_term->peer_m_addr);
5462 if (rng_inst) {
5463 /* clear ranging instance */
5464 WL_TRACE(("reset the ranging instance"));
5465 memset(rng_inst, 0, sizeof(*rng_inst));
5466 }
5467 break;
5468 }
5469 #endif /* WL_NAN_DISC_CACHE */
5470 /*
5471 * Data path events data are received in common event struct,
5472 * Handling all the events as part of one case, hence fall through is intentional
5473 */
5474 case WL_NAN_EVENT_PEER_DATAPATH_IND:
5475 case WL_NAN_EVENT_DATAPATH_ESTB:
5476 case WL_NAN_EVENT_DATAPATH_END: {
5477 ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
5478 &tlvs_offset, &nan_opts_len,
5479 event_num, &hal_event_id, nan_event_data);
5480 /* Avoiding optional param parsing for DP END Event */
5481 if (event_num == WL_NAN_EVENT_DATAPATH_END) {
5482 nan_opts_len = 0;
5483 xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
5484 }
5485 if (unlikely(ret)) {
5486 WL_ERR(("nan dp common event data parse failed\n"));
5487 goto exit;
5488 }
5489 break;
5490 }
5491 default:
5492 WL_ERR(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
5493 ret = BCME_ERROR;
5494 goto exit;
5495 }
5496
5497 if (nan_opts_len) {
5498 tlv_buf = (uint8 *)event_data + tlvs_offset;
5499 /* Extract event data tlvs and pass their resp to cb fn */
5500 ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
5501 nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
5502 if (ret != BCME_OK) {
5503 WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
5504 }
5505 }
5506
5507 #ifdef WL_NAN_DISC_CACHE
5508 if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
5509 WL_TRACE(("Cache disc res\n"));
5510 ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data);
5511 if (ret) {
5512 WL_ERR(("Failed to cache disc result ret %d\n", ret));
5513 }
5514 if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
5515 ret = wl_cfgnan_check_disc_res_for_ranging(cfg, nan_event_data);
5516 if (ret == BCME_OK) {
5517 /* disc result to HAL will be given on ranging report */
5518 goto exit;
5519 } else {
5520 /* TODO: should we terminate service if ranging fails ? */
5521 WL_ERR(("Ranging failed or not required"));
5522 }
5523 } else {
5524 WL_TRACE(("Ranging not required\n"));
5525 }
5526 /*
5527 * If tx match filter is present as part of active subscribe, keep same filter
5528 * values in discovery results also.
5529 */
5530 if (nan_event_data->sub_id == nan_event_data->requestor_id) {
5531 svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
5532 if (svc && svc->tx_match_filter_len) {
5533 nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
5534 nan_event_data->tx_match_filter.data =
5535 MALLOCZ(cfg->osh, svc->tx_match_filter_len);
5536 if (!nan_event_data->tx_match_filter.data) {
5537 WL_ERR(("%s: tx_match_filter_data alloc failed\n",
5538 __FUNCTION__));
5539 nan_event_data->tx_match_filter.dlen = 0;
5540 ret = -ENOMEM;
5541 goto exit;
5542 }
5543 memcpy(nan_event_data->tx_match_filter.data,
5544 svc->tx_match_filter, svc->tx_match_filter_len);
5545 }
5546 }
5547 }
5548 #endif /* WL_NAN_DISC_CACHE */
5549
5550 /* Send up range result as subscribe match event */
5551 if (event_num == WL_NAN_EVENT_RNG_RPT_IND) {
5552 WL_TRACE(("Send up range result as subscribe match event\n"));
5553 hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
5554 }
5555
5556 WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
5557 nan_event_to_str(event_num), event_num, hal_event_id));
5558 #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
5559 ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
5560 hal_event_id, nan_event_data);
5561 if (ret != BCME_OK) {
5562 WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
5563 nan_event_to_str(event_num), event_num));
5564 }
5565 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
5566
5567 exit:
5568 if (nan_event_data) {
5569 if (nan_event_data->tx_match_filter.data) {
5570 MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
5571 nan_event_data->tx_match_filter.dlen);
5572 nan_event_data->tx_match_filter.data = NULL;
5573 }
5574 if (nan_event_data->rx_match_filter.data) {
5575 MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
5576 nan_event_data->rx_match_filter.dlen);
5577 nan_event_data->rx_match_filter.data = NULL;
5578 }
5579 if (nan_event_data->svc_info.data) {
5580 MFREE(cfg->osh, nan_event_data->svc_info.data,
5581 nan_event_data->svc_info.dlen);
5582 nan_event_data->svc_info.data = NULL;
5583 }
5584 if (nan_event_data->sde_svc_info.data) {
5585 MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
5586 nan_event_data->sde_svc_info.dlen);
5587 nan_event_data->sde_svc_info.data = NULL;
5588 }
5589 MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
5590 }
5591
5592 NAN_MUTEX_UNLOCK();
5593 NAN_DBG_EXIT();
5594 return ret;
5595 }
5596
5597 #ifdef WL_NAN_DISC_CACHE
5598 static int
5599 wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data)
5600 {
5601 nan_event_data_t* disc = (nan_event_data_t*)data;
5602 int i, add_index = 0;
5603 int ret = BCME_OK;
5604 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
5605
5606 if (!cfg->nan_enable) {
5607 WL_DBG(("nan not enabled"));
5608 return BCME_NOTENABLED;
5609 }
5610 if (cfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
5611 WL_DBG(("cache full"));
5612 ret = BCME_NORESOURCE;
5613 goto done;
5614 }
5615
5616 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5617 if (!disc_res[i].valid) {
5618 add_index = i;
5619 continue;
5620 }
5621 if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
5622 !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
5623 WL_TRACE(("cache entry already present"));
5624 ret = BCME_OK; /* entry already present */
5625 goto done;
5626 }
5627 }
5628 WL_TRACE(("adding cache entry"));
5629 disc_res[add_index].valid = 1;
5630 disc_res[add_index].pub_id = disc->pub_id;
5631 disc_res[add_index].sub_id = disc->sub_id;
5632 disc_res[add_index].publish_rssi = disc->publish_rssi;
5633 disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
5634 memcpy(&disc_res[add_index].peer, &disc->remote_nmi, ETHER_ADDR_LEN);
5635 memcpy(disc_res[add_index].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN);
5636
5637 if (disc->svc_info.dlen && disc->svc_info.data) {
5638 disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
5639 disc_res[add_index].svc_info.data =
5640 MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
5641 if (!disc_res[add_index].svc_info.data) {
5642 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5643 disc_res[add_index].svc_info.dlen = 0;
5644 ret = BCME_NOMEM;
5645 goto done;
5646 }
5647 memcpy(disc_res[add_index].svc_info.data,
5648 disc->svc_info.data, disc->svc_info.dlen);
5649 }
5650 if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
5651 disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
5652 disc_res[add_index].tx_match_filter.data =
5653 MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
5654 if (!disc_res[add_index].tx_match_filter.data) {
5655 WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
5656 disc_res[add_index].tx_match_filter.dlen = 0;
5657 ret = BCME_NOMEM;
5658 goto done;
5659 }
5660 memcpy(disc_res[add_index].tx_match_filter.data,
5661 disc->tx_match_filter.data, disc->tx_match_filter.dlen);
5662 }
5663 cfg->nan_disc_count++;
5664
5665 done:
5666 return ret;
5667 }
5668
5669 static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
5670 uint8 local_subid)
5671 {
5672 int i;
5673 int ret = BCME_NOTFOUND;
5674 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
5675 if (!cfg->nan_enable) {
5676 WL_DBG(("nan not enabled\n"));
5677 ret = BCME_NOTENABLED;
5678 goto done;
5679 }
5680 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5681 if (disc_res[i].sub_id == local_subid) {
5682 WL_TRACE(("make cache entry invalid\n"));
5683 disc_res[i].valid = 0;
5684 cfg->nan_disc_count--;
5685 ret = BCME_OK;
5686 }
5687 }
5688 WL_DBG(("couldn't find entry\n"));
5689 done:
5690 return ret;
5691 }
5692
5693 static nan_disc_result_cache *
5694 wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
5695 struct ether_addr *peer)
5696 {
5697 int i;
5698 nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
5699 if (remote_pubid) {
5700 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5701 if ((disc_res[i].pub_id == remote_pubid) &&
5702 !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
5703 WL_TRACE(("Found entry"));
5704 return &disc_res[i];
5705 }
5706 }
5707 } else {
5708 for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
5709 if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
5710 WL_TRACE(("Found entry"));
5711 return &disc_res[i];
5712 }
5713 }
5714 }
5715 return NULL;
5716 }
5717 #endif /* WL_NAN_DISC_CACHE */
5718
5719 static void
5720 wl_cfgnan_update_dp_mask(struct bcm_cfg80211 *cfg, bool enable, u8 nan_dp_id)
5721 {
5722 #ifdef ARP_OFFLOAD_SUPPORT
5723 dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
5724 #endif /* ARP_OFFLOAD_SUPPORT */
5725 /* As of now, we don't see a need to know which ndp is active.
5726 * so just keep tracking of ndp via count. If we need to know
5727 * the status of each ndp based on ndp id, we need to change
5728 * this implementation to use a bit mask.
5729 */
5730 if (!dhd) {
5731 WL_ERR(("dhd pub null!\n"));
5732 return;
5733 }
5734
5735 if (enable) {
5736 /* On first NAN DP indication, disable ARP. */
5737 #ifdef ARP_OFFLOAD_SUPPORT
5738 if (!cfg->nan_dp_mask) {
5739 dhd_arp_offload_set(dhd, 0);
5740 dhd_arp_offload_enable(dhd, false);
5741 }
5742 #endif /* ARP_OFFLOAD_SUPPORT */
5743 cfg->nan_dp_mask |= (0x1 << nan_dp_id);
5744 } else {
5745 cfg->nan_dp_mask &= ~(0x1 << nan_dp_id);
5746 #ifdef ARP_OFFLOAD_SUPPORT
5747 if (!cfg->nan_dp_mask) {
5748 /* If NAN DP count becomes zero and if there
5749 * are no conflicts, enable back ARP offload.
5750 * As of now, the conflicting interfaces are AP
5751 * and P2P. But NAN + P2P/AP concurrency is not
5752 * supported.
5753 */
5754 dhd_arp_offload_set(dhd, dhd_arp_mode);
5755 dhd_arp_offload_enable(dhd, true);
5756 }
5757 #endif /* ARP_OFFLOAD_SUPPORT */
5758 }
5759 WL_INFORM_MEM(("NAN_DP_MASK:0x%x\n", cfg->nan_dp_mask));
5760 }
5761
5762 bool
5763 wl_cfgnan_is_dp_active(struct net_device *ndev)
5764 {
5765 struct bcm_cfg80211 *cfg;
5766 bool nan_dp;
5767
5768 if (!ndev || !ndev->ieee80211_ptr) {
5769 WL_ERR(("ndev/wdev null\n"));
5770 return false;
5771 }
5772
5773 cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
5774 nan_dp = cfg->nan_dp_mask ? true : false;
5775
5776 WL_DBG(("NAN DP status:%d\n", nan_dp));
5777 return nan_dp;
5778 }
5779
5780 s32
5781 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
5782 {
5783 int i;
5784 for (i = 0; i < NAN_MAX_NDI; i++) {
5785 if (!cfg->nancfg.ndi[i].in_use) {
5786 /* Free interface, use it */
5787 return i;
5788 }
5789 }
5790 /* Don't have a free interface */
5791 return WL_INVALID;
5792 }
5793
5794 s32
5795 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
5796 {
5797 u16 len;
5798 if (!name || (idx < 0) || (idx >= NAN_MAX_NDI)) {
5799 return -EINVAL;
5800 }
5801
5802 /* Ensure ifname string size <= IFNAMSIZ including null termination */
5803 len = MIN(strlen(name), (IFNAMSIZ - 1));
5804 strncpy(cfg->nancfg.ndi[idx].ifname, name, len);
5805 cfg->nancfg.ndi[idx].ifname[len] = '\0';
5806 cfg->nancfg.ndi[idx].in_use = true;
5807 cfg->nancfg.ndi[idx].created = false;
5808
5809 /* Don't have a free interface */
5810 return WL_INVALID;
5811 }
5812
5813 s32
5814 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
5815 {
5816 u16 len;
5817 int i;
5818 if (!name) {
5819 return -EINVAL;
5820 }
5821
5822 len = MIN(strlen(name), IFNAMSIZ);
5823 for (i = 0; i < NAN_MAX_NDI; i++) {
5824 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
5825 memset(&cfg->nancfg.ndi[i].ifname, 0x0, IFNAMSIZ);
5826 cfg->nancfg.ndi[i].in_use = false;
5827 cfg->nancfg.ndi[i].created = false;
5828 return i;
5829 }
5830 }
5831 return -EINVAL;
5832 }
5833
5834 struct wl_ndi_data *
5835 wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name)
5836 {
5837 u16 len;
5838 int i;
5839 if (!name) {
5840 return NULL;
5841 }
5842
5843 len = MIN(strlen(name), IFNAMSIZ);
5844 for (i = 0; i < NAN_MAX_NDI; i++) {
5845 if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
5846 return &cfg->nancfg.ndi[i];
5847 }
5848 }
5849 return NULL;
5850 }
5851 #endif /* WL_NAN */