wlcore: remove unused set_power method
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl1271_info("Association completed.");
87 return 0;
88 }
89
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
92 {
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
95 int i;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
113 }
114
115 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
116 bool enable)
117 {
118 int ret = 0;
119
120 /* we should hold wl->mutex */
121 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
122 if (ret < 0)
123 goto out;
124
125 if (enable)
126 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
127 else
128 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
129 out:
130 return ret;
131 }
132
133 /*
134 * this function is being called when the rx_streaming interval
135 * has beed changed or rx_streaming should be disabled
136 */
137 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
138 {
139 int ret = 0;
140 int period = wl->conf.rx_streaming.interval;
141
142 /* don't reconfigure if rx_streaming is disabled */
143 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
144 goto out;
145
146 /* reconfigure/disable according to new streaming_period */
147 if (period &&
148 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
149 (wl->conf.rx_streaming.always ||
150 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 ret = wl1271_set_rx_streaming(wl, wlvif, true);
152 else {
153 ret = wl1271_set_rx_streaming(wl, wlvif, false);
154 /* don't cancel_work_sync since we might deadlock */
155 del_timer_sync(&wlvif->rx_streaming_timer);
156 }
157 out:
158 return ret;
159 }
160
161 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
162 {
163 int ret;
164 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
165 rx_streaming_enable_work);
166 struct wl1271 *wl = wlvif->wl;
167
168 mutex_lock(&wl->mutex);
169
170 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
171 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
172 (!wl->conf.rx_streaming.always &&
173 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
174 goto out;
175
176 if (!wl->conf.rx_streaming.interval)
177 goto out;
178
179 ret = wl1271_ps_elp_wakeup(wl);
180 if (ret < 0)
181 goto out;
182
183 ret = wl1271_set_rx_streaming(wl, wlvif, true);
184 if (ret < 0)
185 goto out_sleep;
186
187 /* stop it after some time of inactivity */
188 mod_timer(&wlvif->rx_streaming_timer,
189 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
190
191 out_sleep:
192 wl1271_ps_elp_sleep(wl);
193 out:
194 mutex_unlock(&wl->mutex);
195 }
196
197 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
198 {
199 int ret;
200 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
201 rx_streaming_disable_work);
202 struct wl1271 *wl = wlvif->wl;
203
204 mutex_lock(&wl->mutex);
205
206 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
207 goto out;
208
209 ret = wl1271_ps_elp_wakeup(wl);
210 if (ret < 0)
211 goto out;
212
213 ret = wl1271_set_rx_streaming(wl, wlvif, false);
214 if (ret)
215 goto out_sleep;
216
217 out_sleep:
218 wl1271_ps_elp_sleep(wl);
219 out:
220 mutex_unlock(&wl->mutex);
221 }
222
223 static void wl1271_rx_streaming_timer(unsigned long data)
224 {
225 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
226 struct wl1271 *wl = wlvif->wl;
227 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
228 }
229
230 /* wl->mutex must be taken */
231 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
232 {
233 /* if the watchdog is not armed, don't do anything */
234 if (wl->tx_allocated_blocks == 0)
235 return;
236
237 cancel_delayed_work(&wl->tx_watchdog_work);
238 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
239 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
240 }
241
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 struct delayed_work *dwork;
245 struct wl1271 *wl;
246
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249
250 mutex_lock(&wl->mutex);
251
252 if (unlikely(wl->state != WLCORE_STATE_ON))
253 goto out;
254
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
257 goto out;
258
259 /*
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
262 */
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
267 goto out;
268 }
269
270 /*
271 * if a scan is in progress, we might not have any Tx for a long
272 * time
273 */
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
278 goto out;
279 }
280
281 /*
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
286 */
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 " %d stations",
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
293 goto out;
294 }
295
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
299
300 out:
301 mutex_unlock(&wl->mutex);
302 }
303
304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 /* Adjust settings according to optional module parameters */
307
308 if (fwlog_param) {
309 if (!strcmp(fwlog_param, "continuous")) {
310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
311 } else if (!strcmp(fwlog_param, "ondemand")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
313 } else if (!strcmp(fwlog_param, "dbgpins")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
315 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
316 } else if (!strcmp(fwlog_param, "disable")) {
317 wl->conf.fwlog.mem_blocks = 0;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
319 } else {
320 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
321 }
322 }
323
324 if (bug_on_recovery != -1)
325 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
326
327 if (no_recovery != -1)
328 wl->conf.recovery.no_recovery = (u8) no_recovery;
329 }
330
331 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
332 struct wl12xx_vif *wlvif,
333 u8 hlid, u8 tx_pkts)
334 {
335 bool fw_ps, single_link;
336
337 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
338 single_link = (wl->active_link_count == 1);
339
340 /*
341 * Wake up from high level PS if the STA is asleep with too little
342 * packets in FW or if the STA is awake.
343 */
344 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
345 wl12xx_ps_link_end(wl, wlvif, hlid);
346
347 /*
348 * Start high-level PS if the STA is asleep with enough blocks in FW.
349 * Make an exception if this is the only connected link. In this
350 * case FW-memory congestion is less of a problem.
351 */
352 else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
353 wl12xx_ps_link_start(wl, wlvif, hlid, true);
354 }
355
356 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
357 struct wl12xx_vif *wlvif,
358 struct wl_fw_status_2 *status)
359 {
360 u32 cur_fw_ps_map;
361 u8 hlid;
362
363 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
364 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
365 wl1271_debug(DEBUG_PSM,
366 "link ps prev 0x%x cur 0x%x changed 0x%x",
367 wl->ap_fw_ps_map, cur_fw_ps_map,
368 wl->ap_fw_ps_map ^ cur_fw_ps_map);
369
370 wl->ap_fw_ps_map = cur_fw_ps_map;
371 }
372
373 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
374 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
375 wl->links[hlid].allocated_pkts);
376 }
377
378 static int wlcore_fw_status(struct wl1271 *wl,
379 struct wl_fw_status_1 *status_1,
380 struct wl_fw_status_2 *status_2)
381 {
382 struct wl12xx_vif *wlvif;
383 struct timespec ts;
384 u32 old_tx_blk_count = wl->tx_blocks_available;
385 int avail, freed_blocks;
386 int i;
387 size_t status_len;
388 int ret;
389 struct wl1271_link *lnk;
390
391 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
392 sizeof(*status_2) + wl->fw_status_priv_len;
393
394 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
395 status_len, false);
396 if (ret < 0)
397 return ret;
398
399 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 "drv_rx_counter = %d, tx_results_counter = %d)",
401 status_1->intr,
402 status_1->fw_rx_counter,
403 status_1->drv_rx_counter,
404 status_1->tx_results_counter);
405
406 for (i = 0; i < NUM_TX_QUEUES; i++) {
407 /* prevent wrap-around in freed-packets counter */
408 wl->tx_allocated_pkts[i] -=
409 (status_2->counters.tx_released_pkts[i] -
410 wl->tx_pkts_freed[i]) & 0xff;
411
412 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
413 }
414
415
416 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
417 lnk = &wl->links[i];
418 /* prevent wrap-around in freed-packets counter */
419 lnk->allocated_pkts -=
420 (status_2->counters.tx_lnk_free_pkts[i] -
421 lnk->prev_freed_pkts) & 0xff;
422
423 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
424 }
425
426 /* prevent wrap-around in total blocks counter */
427 if (likely(wl->tx_blocks_freed <=
428 le32_to_cpu(status_2->total_released_blks)))
429 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
430 wl->tx_blocks_freed;
431 else
432 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
433 le32_to_cpu(status_2->total_released_blks);
434
435 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
436
437 wl->tx_allocated_blocks -= freed_blocks;
438
439 /*
440 * If the FW freed some blocks:
441 * If we still have allocated blocks - re-arm the timer, Tx is
442 * not stuck. Otherwise, cancel the timer (no Tx currently).
443 */
444 if (freed_blocks) {
445 if (wl->tx_allocated_blocks)
446 wl12xx_rearm_tx_watchdog_locked(wl);
447 else
448 cancel_delayed_work(&wl->tx_watchdog_work);
449 }
450
451 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
452
453 /*
454 * The FW might change the total number of TX memblocks before
455 * we get a notification about blocks being released. Thus, the
456 * available blocks calculation might yield a temporary result
457 * which is lower than the actual available blocks. Keeping in
458 * mind that only blocks that were allocated can be moved from
459 * TX to RX, tx_blocks_available should never decrease here.
460 */
461 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
462 avail);
463
464 /* if more blocks are available now, tx work can be scheduled */
465 if (wl->tx_blocks_available > old_tx_blk_count)
466 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
467
468 /* for AP update num of allocated TX blocks per link and ps status */
469 wl12xx_for_each_wlvif_ap(wl, wlvif) {
470 wl12xx_irq_update_links_status(wl, wlvif, status_2);
471 }
472
473 /* update the host-chipset time offset */
474 getnstimeofday(&ts);
475 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
476 (s64)le32_to_cpu(status_2->fw_localtime);
477
478 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
479
480 return 0;
481 }
482
483 static void wl1271_flush_deferred_work(struct wl1271 *wl)
484 {
485 struct sk_buff *skb;
486
487 /* Pass all received frames to the network stack */
488 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
489 ieee80211_rx_ni(wl->hw, skb);
490
491 /* Return sent skbs to the network stack */
492 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
493 ieee80211_tx_status_ni(wl->hw, skb);
494 }
495
496 static void wl1271_netstack_work(struct work_struct *work)
497 {
498 struct wl1271 *wl =
499 container_of(work, struct wl1271, netstack_work);
500
501 do {
502 wl1271_flush_deferred_work(wl);
503 } while (skb_queue_len(&wl->deferred_rx_queue));
504 }
505
506 #define WL1271_IRQ_MAX_LOOPS 256
507
508 static int wlcore_irq_locked(struct wl1271 *wl)
509 {
510 int ret = 0;
511 u32 intr;
512 int loopcount = WL1271_IRQ_MAX_LOOPS;
513 bool done = false;
514 unsigned int defer_count;
515 unsigned long flags;
516
517 /*
518 * In case edge triggered interrupt must be used, we cannot iterate
519 * more than once without introducing race conditions with the hardirq.
520 */
521 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
522 loopcount = 1;
523
524 wl1271_debug(DEBUG_IRQ, "IRQ work");
525
526 if (unlikely(wl->state != WLCORE_STATE_ON))
527 goto out;
528
529 ret = wl1271_ps_elp_wakeup(wl);
530 if (ret < 0)
531 goto out;
532
533 while (!done && loopcount--) {
534 /*
535 * In order to avoid a race with the hardirq, clear the flag
536 * before acknowledging the chip. Since the mutex is held,
537 * wl1271_ps_elp_wakeup cannot be called concurrently.
538 */
539 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
540 smp_mb__after_clear_bit();
541
542 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
543 if (ret < 0)
544 goto out;
545
546 wlcore_hw_tx_immediate_compl(wl);
547
548 intr = le32_to_cpu(wl->fw_status_1->intr);
549 intr &= WLCORE_ALL_INTR_MASK;
550 if (!intr) {
551 done = true;
552 continue;
553 }
554
555 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
556 wl1271_error("HW watchdog interrupt received! starting recovery.");
557 wl->watchdog_recovery = true;
558 ret = -EIO;
559
560 /* restarting the chip. ignore any other interrupt. */
561 goto out;
562 }
563
564 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
565 wl1271_error("SW watchdog interrupt received! "
566 "starting recovery.");
567 wl->watchdog_recovery = true;
568 ret = -EIO;
569
570 /* restarting the chip. ignore any other interrupt. */
571 goto out;
572 }
573
574 if (likely(intr & WL1271_ACX_INTR_DATA)) {
575 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
576
577 ret = wlcore_rx(wl, wl->fw_status_1);
578 if (ret < 0)
579 goto out;
580
581 /* Check if any tx blocks were freed */
582 spin_lock_irqsave(&wl->wl_lock, flags);
583 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
584 wl1271_tx_total_queue_count(wl) > 0) {
585 spin_unlock_irqrestore(&wl->wl_lock, flags);
586 /*
587 * In order to avoid starvation of the TX path,
588 * call the work function directly.
589 */
590 ret = wlcore_tx_work_locked(wl);
591 if (ret < 0)
592 goto out;
593 } else {
594 spin_unlock_irqrestore(&wl->wl_lock, flags);
595 }
596
597 /* check for tx results */
598 ret = wlcore_hw_tx_delayed_compl(wl);
599 if (ret < 0)
600 goto out;
601
602 /* Make sure the deferred queues don't get too long */
603 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
604 skb_queue_len(&wl->deferred_rx_queue);
605 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
606 wl1271_flush_deferred_work(wl);
607 }
608
609 if (intr & WL1271_ACX_INTR_EVENT_A) {
610 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
611 ret = wl1271_event_handle(wl, 0);
612 if (ret < 0)
613 goto out;
614 }
615
616 if (intr & WL1271_ACX_INTR_EVENT_B) {
617 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
618 ret = wl1271_event_handle(wl, 1);
619 if (ret < 0)
620 goto out;
621 }
622
623 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
624 wl1271_debug(DEBUG_IRQ,
625 "WL1271_ACX_INTR_INIT_COMPLETE");
626
627 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
628 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
629 }
630
631 wl1271_ps_elp_sleep(wl);
632
633 out:
634 return ret;
635 }
636
637 static irqreturn_t wlcore_irq(int irq, void *cookie)
638 {
639 int ret;
640 unsigned long flags;
641 struct wl1271 *wl = cookie;
642
643 /* TX might be handled here, avoid redundant work */
644 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
645 cancel_work_sync(&wl->tx_work);
646
647 mutex_lock(&wl->mutex);
648
649 ret = wlcore_irq_locked(wl);
650 if (ret)
651 wl12xx_queue_recovery_work(wl);
652
653 spin_lock_irqsave(&wl->wl_lock, flags);
654 /* In case TX was not handled here, queue TX work */
655 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
656 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
657 wl1271_tx_total_queue_count(wl) > 0)
658 ieee80211_queue_work(wl->hw, &wl->tx_work);
659 spin_unlock_irqrestore(&wl->wl_lock, flags);
660
661 mutex_unlock(&wl->mutex);
662
663 return IRQ_HANDLED;
664 }
665
666 struct vif_counter_data {
667 u8 counter;
668
669 struct ieee80211_vif *cur_vif;
670 bool cur_vif_running;
671 };
672
673 static void wl12xx_vif_count_iter(void *data, u8 *mac,
674 struct ieee80211_vif *vif)
675 {
676 struct vif_counter_data *counter = data;
677
678 counter->counter++;
679 if (counter->cur_vif == vif)
680 counter->cur_vif_running = true;
681 }
682
683 /* caller must not hold wl->mutex, as it might deadlock */
684 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
685 struct ieee80211_vif *cur_vif,
686 struct vif_counter_data *data)
687 {
688 memset(data, 0, sizeof(*data));
689 data->cur_vif = cur_vif;
690
691 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
692 wl12xx_vif_count_iter, data);
693 }
694
695 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
696 {
697 const struct firmware *fw;
698 const char *fw_name;
699 enum wl12xx_fw_type fw_type;
700 int ret;
701
702 if (plt) {
703 fw_type = WL12XX_FW_TYPE_PLT;
704 fw_name = wl->plt_fw_name;
705 } else {
706 /*
707 * we can't call wl12xx_get_vif_count() here because
708 * wl->mutex is taken, so use the cached last_vif_count value
709 */
710 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
711 fw_type = WL12XX_FW_TYPE_MULTI;
712 fw_name = wl->mr_fw_name;
713 } else {
714 fw_type = WL12XX_FW_TYPE_NORMAL;
715 fw_name = wl->sr_fw_name;
716 }
717 }
718
719 if (wl->fw_type == fw_type)
720 return 0;
721
722 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
723
724 ret = request_firmware(&fw, fw_name, wl->dev);
725
726 if (ret < 0) {
727 wl1271_error("could not get firmware %s: %d", fw_name, ret);
728 return ret;
729 }
730
731 if (fw->size % 4) {
732 wl1271_error("firmware size is not multiple of 32 bits: %zu",
733 fw->size);
734 ret = -EILSEQ;
735 goto out;
736 }
737
738 vfree(wl->fw);
739 wl->fw_type = WL12XX_FW_TYPE_NONE;
740 wl->fw_len = fw->size;
741 wl->fw = vmalloc(wl->fw_len);
742
743 if (!wl->fw) {
744 wl1271_error("could not allocate memory for the firmware");
745 ret = -ENOMEM;
746 goto out;
747 }
748
749 memcpy(wl->fw, fw->data, wl->fw_len);
750 ret = 0;
751 wl->fw_type = fw_type;
752 out:
753 release_firmware(fw);
754
755 return ret;
756 }
757
758 void wl12xx_queue_recovery_work(struct wl1271 *wl)
759 {
760 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
761
762 /* Avoid a recursive recovery */
763 if (wl->state == WLCORE_STATE_ON) {
764 wl->state = WLCORE_STATE_RESTARTING;
765 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
766 wlcore_disable_interrupts_nosync(wl);
767 ieee80211_queue_work(wl->hw, &wl->recovery_work);
768 }
769 }
770
771 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
772 {
773 size_t len = 0;
774
775 /* The FW log is a length-value list, find where the log end */
776 while (len < maxlen) {
777 if (memblock[len] == 0)
778 break;
779 if (len + memblock[len] + 1 > maxlen)
780 break;
781 len += memblock[len] + 1;
782 }
783
784 /* Make sure we have enough room */
785 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
786
787 /* Fill the FW log file, consumed by the sysfs fwlog entry */
788 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
789 wl->fwlog_size += len;
790
791 return len;
792 }
793
794 #define WLCORE_FW_LOG_END 0x2000000
795
796 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
797 {
798 u32 addr;
799 u32 offset;
800 u32 end_of_log;
801 u8 *block;
802 int ret;
803
804 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
805 (wl->conf.fwlog.mem_blocks == 0))
806 return;
807
808 wl1271_info("Reading FW panic log");
809
810 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
811 if (!block)
812 return;
813
814 /*
815 * Make sure the chip is awake and the logger isn't active.
816 * Do not send a stop fwlog command if the fw is hanged or if
817 * dbgpins are used (due to some fw bug).
818 */
819 if (wl1271_ps_elp_wakeup(wl))
820 goto out;
821 if (!wl->watchdog_recovery &&
822 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
823 wl12xx_cmd_stop_fwlog(wl);
824
825 /* Read the first memory block address */
826 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
827 if (ret < 0)
828 goto out;
829
830 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
831 if (!addr)
832 goto out;
833
834 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
835 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
836 end_of_log = WLCORE_FW_LOG_END;
837 } else {
838 offset = sizeof(addr);
839 end_of_log = addr;
840 }
841
842 /* Traverse the memory blocks linked list */
843 do {
844 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
845 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
846 false);
847 if (ret < 0)
848 goto out;
849
850 /*
851 * Memory blocks are linked to one another. The first 4 bytes
852 * of each memory block hold the hardware address of the next
853 * one. The last memory block points to the first one in
854 * on demand mode and is equal to 0x2000000 in continuous mode.
855 */
856 addr = le32_to_cpup((__le32 *)block);
857 if (!wl12xx_copy_fwlog(wl, block + offset,
858 WL12XX_HW_BLOCK_SIZE - offset))
859 break;
860 } while (addr && (addr != end_of_log));
861
862 wake_up_interruptible(&wl->fwlog_waitq);
863
864 out:
865 kfree(block);
866 }
867
868 static void wlcore_print_recovery(struct wl1271 *wl)
869 {
870 u32 pc = 0;
871 u32 hint_sts = 0;
872 int ret;
873
874 wl1271_info("Hardware recovery in progress. FW ver: %s",
875 wl->chip.fw_ver_str);
876
877 /* change partitions momentarily so we can read the FW pc */
878 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
879 if (ret < 0)
880 return;
881
882 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
883 if (ret < 0)
884 return;
885
886 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
887 if (ret < 0)
888 return;
889
890 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
891 pc, hint_sts, ++wl->recovery_count);
892
893 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
894 }
895
896
897 static void wl1271_recovery_work(struct work_struct *work)
898 {
899 struct wl1271 *wl =
900 container_of(work, struct wl1271, recovery_work);
901 struct wl12xx_vif *wlvif;
902 struct ieee80211_vif *vif;
903
904 mutex_lock(&wl->mutex);
905
906 if (wl->state == WLCORE_STATE_OFF || wl->plt)
907 goto out_unlock;
908
909 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
910 wl12xx_read_fwlog_panic(wl);
911 wlcore_print_recovery(wl);
912 }
913
914 BUG_ON(wl->conf.recovery.bug_on_recovery &&
915 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
916
917 if (wl->conf.recovery.no_recovery) {
918 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
919 goto out_unlock;
920 }
921
922 /*
923 * Advance security sequence number to overcome potential progress
924 * in the firmware during recovery. This doens't hurt if the network is
925 * not encrypted.
926 */
927 wl12xx_for_each_wlvif(wl, wlvif) {
928 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
929 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
930 wlvif->tx_security_seq +=
931 WL1271_TX_SQN_POST_RECOVERY_PADDING;
932 }
933
934 /* Prevent spurious TX during FW restart */
935 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
936
937 /* reboot the chipset */
938 while (!list_empty(&wl->wlvif_list)) {
939 wlvif = list_first_entry(&wl->wlvif_list,
940 struct wl12xx_vif, list);
941 vif = wl12xx_wlvif_to_vif(wlvif);
942 __wl1271_op_remove_interface(wl, vif, false);
943 }
944
945 wlcore_op_stop_locked(wl);
946
947 ieee80211_restart_hw(wl->hw);
948
949 /*
950 * Its safe to enable TX now - the queues are stopped after a request
951 * to restart the HW.
952 */
953 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
954
955 out_unlock:
956 wl->watchdog_recovery = false;
957 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
958 mutex_unlock(&wl->mutex);
959 }
960
961 static int wlcore_fw_wakeup(struct wl1271 *wl)
962 {
963 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
964 }
965
966 static int wl1271_setup(struct wl1271 *wl)
967 {
968 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
969 sizeof(*wl->fw_status_2) +
970 wl->fw_status_priv_len, GFP_KERNEL);
971 if (!wl->fw_status_1)
972 return -ENOMEM;
973
974 wl->fw_status_2 = (struct wl_fw_status_2 *)
975 (((u8 *) wl->fw_status_1) +
976 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
977
978 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
979 if (!wl->tx_res_if) {
980 kfree(wl->fw_status_1);
981 return -ENOMEM;
982 }
983
984 return 0;
985 }
986
987 static int wl12xx_set_power_on(struct wl1271 *wl)
988 {
989 int ret;
990
991 msleep(WL1271_PRE_POWER_ON_SLEEP);
992 ret = wl1271_power_on(wl);
993 if (ret < 0)
994 goto out;
995 msleep(WL1271_POWER_ON_SLEEP);
996 wl1271_io_reset(wl);
997 wl1271_io_init(wl);
998
999 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1000 if (ret < 0)
1001 goto fail;
1002
1003 /* ELP module wake up */
1004 ret = wlcore_fw_wakeup(wl);
1005 if (ret < 0)
1006 goto fail;
1007
1008 out:
1009 return ret;
1010
1011 fail:
1012 wl1271_power_off(wl);
1013 return ret;
1014 }
1015
1016 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1017 {
1018 int ret = 0;
1019
1020 ret = wl12xx_set_power_on(wl);
1021 if (ret < 0)
1022 goto out;
1023
1024 /*
1025 * For wl127x based devices we could use the default block
1026 * size (512 bytes), but due to a bug in the sdio driver, we
1027 * need to set it explicitly after the chip is powered on. To
1028 * simplify the code and since the performance impact is
1029 * negligible, we use the same block size for all different
1030 * chip types.
1031 *
1032 * Check if the bus supports blocksize alignment and, if it
1033 * doesn't, make sure we don't have the quirk.
1034 */
1035 if (!wl1271_set_block_size(wl))
1036 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1037
1038 /* TODO: make sure the lower driver has set things up correctly */
1039
1040 ret = wl1271_setup(wl);
1041 if (ret < 0)
1042 goto out;
1043
1044 ret = wl12xx_fetch_firmware(wl, plt);
1045 if (ret < 0)
1046 goto out;
1047
1048 out:
1049 return ret;
1050 }
1051
1052 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1053 {
1054 int retries = WL1271_BOOT_RETRIES;
1055 struct wiphy *wiphy = wl->hw->wiphy;
1056
1057 static const char* const PLT_MODE[] = {
1058 "PLT_OFF",
1059 "PLT_ON",
1060 "PLT_FEM_DETECT"
1061 };
1062
1063 int ret;
1064
1065 mutex_lock(&wl->mutex);
1066
1067 wl1271_notice("power up");
1068
1069 if (wl->state != WLCORE_STATE_OFF) {
1070 wl1271_error("cannot go into PLT state because not "
1071 "in off state: %d", wl->state);
1072 ret = -EBUSY;
1073 goto out;
1074 }
1075
1076 /* Indicate to lower levels that we are now in PLT mode */
1077 wl->plt = true;
1078 wl->plt_mode = plt_mode;
1079
1080 while (retries) {
1081 retries--;
1082 ret = wl12xx_chip_wakeup(wl, true);
1083 if (ret < 0)
1084 goto power_off;
1085
1086 ret = wl->ops->plt_init(wl);
1087 if (ret < 0)
1088 goto power_off;
1089
1090 wl->state = WLCORE_STATE_ON;
1091 wl1271_notice("firmware booted in PLT mode %s (%s)",
1092 PLT_MODE[plt_mode],
1093 wl->chip.fw_ver_str);
1094
1095 /* update hw/fw version info in wiphy struct */
1096 wiphy->hw_version = wl->chip.id;
1097 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1098 sizeof(wiphy->fw_version));
1099
1100 goto out;
1101
1102 power_off:
1103 wl1271_power_off(wl);
1104 }
1105
1106 wl->plt = false;
1107 wl->plt_mode = PLT_OFF;
1108
1109 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1110 WL1271_BOOT_RETRIES);
1111 out:
1112 mutex_unlock(&wl->mutex);
1113
1114 return ret;
1115 }
1116
1117 int wl1271_plt_stop(struct wl1271 *wl)
1118 {
1119 int ret = 0;
1120
1121 wl1271_notice("power down");
1122
1123 /*
1124 * Interrupts must be disabled before setting the state to OFF.
1125 * Otherwise, the interrupt handler might be called and exit without
1126 * reading the interrupt status.
1127 */
1128 wlcore_disable_interrupts(wl);
1129 mutex_lock(&wl->mutex);
1130 if (!wl->plt) {
1131 mutex_unlock(&wl->mutex);
1132
1133 /*
1134 * This will not necessarily enable interrupts as interrupts
1135 * may have been disabled when op_stop was called. It will,
1136 * however, balance the above call to disable_interrupts().
1137 */
1138 wlcore_enable_interrupts(wl);
1139
1140 wl1271_error("cannot power down because not in PLT "
1141 "state: %d", wl->state);
1142 ret = -EBUSY;
1143 goto out;
1144 }
1145
1146 mutex_unlock(&wl->mutex);
1147
1148 wl1271_flush_deferred_work(wl);
1149 cancel_work_sync(&wl->netstack_work);
1150 cancel_work_sync(&wl->recovery_work);
1151 cancel_delayed_work_sync(&wl->elp_work);
1152 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1153
1154 mutex_lock(&wl->mutex);
1155 wl1271_power_off(wl);
1156 wl->flags = 0;
1157 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1158 wl->state = WLCORE_STATE_OFF;
1159 wl->plt = false;
1160 wl->plt_mode = PLT_OFF;
1161 wl->rx_counter = 0;
1162 mutex_unlock(&wl->mutex);
1163
1164 out:
1165 return ret;
1166 }
1167
1168 static void wl1271_op_tx(struct ieee80211_hw *hw,
1169 struct ieee80211_tx_control *control,
1170 struct sk_buff *skb)
1171 {
1172 struct wl1271 *wl = hw->priv;
1173 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1174 struct ieee80211_vif *vif = info->control.vif;
1175 struct wl12xx_vif *wlvif = NULL;
1176 unsigned long flags;
1177 int q, mapping;
1178 u8 hlid;
1179
1180 if (!vif) {
1181 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1182 ieee80211_free_txskb(hw, skb);
1183 return;
1184 }
1185
1186 wlvif = wl12xx_vif_to_data(vif);
1187 mapping = skb_get_queue_mapping(skb);
1188 q = wl1271_tx_get_queue(mapping);
1189
1190 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1191
1192 spin_lock_irqsave(&wl->wl_lock, flags);
1193
1194 /*
1195 * drop the packet if the link is invalid or the queue is stopped
1196 * for any reason but watermark. Watermark is a "soft"-stop so we
1197 * allow these packets through.
1198 */
1199 if (hlid == WL12XX_INVALID_LINK_ID ||
1200 (!test_bit(hlid, wlvif->links_map)) ||
1201 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1202 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1203 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1204 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1205 ieee80211_free_txskb(hw, skb);
1206 goto out;
1207 }
1208
1209 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1210 hlid, q, skb->len);
1211 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1212
1213 wl->tx_queue_count[q]++;
1214 wlvif->tx_queue_count[q]++;
1215
1216 /*
1217 * The workqueue is slow to process the tx_queue and we need stop
1218 * the queue here, otherwise the queue will get too long.
1219 */
1220 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1223 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1224 wlcore_stop_queue_locked(wl, wlvif, q,
1225 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1226 }
1227
1228 /*
1229 * The chip specific setup must run before the first TX packet -
1230 * before that, the tx_work will not be initialized!
1231 */
1232
1233 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1234 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1235 ieee80211_queue_work(wl->hw, &wl->tx_work);
1236
1237 out:
1238 spin_unlock_irqrestore(&wl->wl_lock, flags);
1239 }
1240
1241 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1242 {
1243 unsigned long flags;
1244 int q;
1245
1246 /* no need to queue a new dummy packet if one is already pending */
1247 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1248 return 0;
1249
1250 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1251
1252 spin_lock_irqsave(&wl->wl_lock, flags);
1253 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1254 wl->tx_queue_count[q]++;
1255 spin_unlock_irqrestore(&wl->wl_lock, flags);
1256
1257 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1258 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1259 return wlcore_tx_work_locked(wl);
1260
1261 /*
1262 * If the FW TX is busy, TX work will be scheduled by the threaded
1263 * interrupt handler function
1264 */
1265 return 0;
1266 }
1267
1268 /*
1269 * The size of the dummy packet should be at least 1400 bytes. However, in
1270 * order to minimize the number of bus transactions, aligning it to 512 bytes
1271 * boundaries could be beneficial, performance wise
1272 */
1273 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1274
1275 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1276 {
1277 struct sk_buff *skb;
1278 struct ieee80211_hdr_3addr *hdr;
1279 unsigned int dummy_packet_size;
1280
1281 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1282 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1283
1284 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1285 if (!skb) {
1286 wl1271_warning("Failed to allocate a dummy packet skb");
1287 return NULL;
1288 }
1289
1290 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1291
1292 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1293 memset(hdr, 0, sizeof(*hdr));
1294 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1295 IEEE80211_STYPE_NULLFUNC |
1296 IEEE80211_FCTL_TODS);
1297
1298 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1299
1300 /* Dummy packets require the TID to be management */
1301 skb->priority = WL1271_TID_MGMT;
1302
1303 /* Initialize all fields that might be used */
1304 skb_set_queue_mapping(skb, 0);
1305 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1306
1307 return skb;
1308 }
1309
1310
1311 #ifdef CONFIG_PM
1312 static int
1313 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1314 {
1315 int num_fields = 0, in_field = 0, fields_size = 0;
1316 int i, pattern_len = 0;
1317
1318 if (!p->mask) {
1319 wl1271_warning("No mask in WoWLAN pattern");
1320 return -EINVAL;
1321 }
1322
1323 /*
1324 * The pattern is broken up into segments of bytes at different offsets
1325 * that need to be checked by the FW filter. Each segment is called
1326 * a field in the FW API. We verify that the total number of fields
1327 * required for this pattern won't exceed FW limits (8)
1328 * as well as the total fields buffer won't exceed the FW limit.
1329 * Note that if there's a pattern which crosses Ethernet/IP header
1330 * boundary a new field is required.
1331 */
1332 for (i = 0; i < p->pattern_len; i++) {
1333 if (test_bit(i, (unsigned long *)p->mask)) {
1334 if (!in_field) {
1335 in_field = 1;
1336 pattern_len = 1;
1337 } else {
1338 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1339 num_fields++;
1340 fields_size += pattern_len +
1341 RX_FILTER_FIELD_OVERHEAD;
1342 pattern_len = 1;
1343 } else
1344 pattern_len++;
1345 }
1346 } else {
1347 if (in_field) {
1348 in_field = 0;
1349 fields_size += pattern_len +
1350 RX_FILTER_FIELD_OVERHEAD;
1351 num_fields++;
1352 }
1353 }
1354 }
1355
1356 if (in_field) {
1357 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1358 num_fields++;
1359 }
1360
1361 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1362 wl1271_warning("RX Filter too complex. Too many segments");
1363 return -EINVAL;
1364 }
1365
1366 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1367 wl1271_warning("RX filter pattern is too big");
1368 return -E2BIG;
1369 }
1370
1371 return 0;
1372 }
1373
1374 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1375 {
1376 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1377 }
1378
1379 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1380 {
1381 int i;
1382
1383 if (filter == NULL)
1384 return;
1385
1386 for (i = 0; i < filter->num_fields; i++)
1387 kfree(filter->fields[i].pattern);
1388
1389 kfree(filter);
1390 }
1391
1392 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1393 u16 offset, u8 flags,
1394 u8 *pattern, u8 len)
1395 {
1396 struct wl12xx_rx_filter_field *field;
1397
1398 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1399 wl1271_warning("Max fields per RX filter. can't alloc another");
1400 return -EINVAL;
1401 }
1402
1403 field = &filter->fields[filter->num_fields];
1404
1405 field->pattern = kzalloc(len, GFP_KERNEL);
1406 if (!field->pattern) {
1407 wl1271_warning("Failed to allocate RX filter pattern");
1408 return -ENOMEM;
1409 }
1410
1411 filter->num_fields++;
1412
1413 field->offset = cpu_to_le16(offset);
1414 field->flags = flags;
1415 field->len = len;
1416 memcpy(field->pattern, pattern, len);
1417
1418 return 0;
1419 }
1420
1421 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1422 {
1423 int i, fields_size = 0;
1424
1425 for (i = 0; i < filter->num_fields; i++)
1426 fields_size += filter->fields[i].len +
1427 sizeof(struct wl12xx_rx_filter_field) -
1428 sizeof(u8 *);
1429
1430 return fields_size;
1431 }
1432
1433 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1434 u8 *buf)
1435 {
1436 int i;
1437 struct wl12xx_rx_filter_field *field;
1438
1439 for (i = 0; i < filter->num_fields; i++) {
1440 field = (struct wl12xx_rx_filter_field *)buf;
1441
1442 field->offset = filter->fields[i].offset;
1443 field->flags = filter->fields[i].flags;
1444 field->len = filter->fields[i].len;
1445
1446 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1447 buf += sizeof(struct wl12xx_rx_filter_field) -
1448 sizeof(u8 *) + field->len;
1449 }
1450 }
1451
1452 /*
1453 * Allocates an RX filter returned through f
1454 * which needs to be freed using rx_filter_free()
1455 */
1456 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1457 struct cfg80211_wowlan_trig_pkt_pattern *p,
1458 struct wl12xx_rx_filter **f)
1459 {
1460 int i, j, ret = 0;
1461 struct wl12xx_rx_filter *filter;
1462 u16 offset;
1463 u8 flags, len;
1464
1465 filter = wl1271_rx_filter_alloc();
1466 if (!filter) {
1467 wl1271_warning("Failed to alloc rx filter");
1468 ret = -ENOMEM;
1469 goto err;
1470 }
1471
1472 i = 0;
1473 while (i < p->pattern_len) {
1474 if (!test_bit(i, (unsigned long *)p->mask)) {
1475 i++;
1476 continue;
1477 }
1478
1479 for (j = i; j < p->pattern_len; j++) {
1480 if (!test_bit(j, (unsigned long *)p->mask))
1481 break;
1482
1483 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1484 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1485 break;
1486 }
1487
1488 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1489 offset = i;
1490 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1491 } else {
1492 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1493 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1494 }
1495
1496 len = j - i;
1497
1498 ret = wl1271_rx_filter_alloc_field(filter,
1499 offset,
1500 flags,
1501 &p->pattern[i], len);
1502 if (ret)
1503 goto err;
1504
1505 i = j;
1506 }
1507
1508 filter->action = FILTER_SIGNAL;
1509
1510 *f = filter;
1511 return 0;
1512
1513 err:
1514 wl1271_rx_filter_free(filter);
1515 *f = NULL;
1516
1517 return ret;
1518 }
1519
1520 static int wl1271_configure_wowlan(struct wl1271 *wl,
1521 struct cfg80211_wowlan *wow)
1522 {
1523 int i, ret;
1524
1525 if (!wow || wow->any || !wow->n_patterns) {
1526 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1527 FILTER_SIGNAL);
1528 if (ret)
1529 goto out;
1530
1531 ret = wl1271_rx_filter_clear_all(wl);
1532 if (ret)
1533 goto out;
1534
1535 return 0;
1536 }
1537
1538 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1539 return -EINVAL;
1540
1541 /* Validate all incoming patterns before clearing current FW state */
1542 for (i = 0; i < wow->n_patterns; i++) {
1543 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1544 if (ret) {
1545 wl1271_warning("Bad wowlan pattern %d", i);
1546 return ret;
1547 }
1548 }
1549
1550 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1551 if (ret)
1552 goto out;
1553
1554 ret = wl1271_rx_filter_clear_all(wl);
1555 if (ret)
1556 goto out;
1557
1558 /* Translate WoWLAN patterns into filters */
1559 for (i = 0; i < wow->n_patterns; i++) {
1560 struct cfg80211_wowlan_trig_pkt_pattern *p;
1561 struct wl12xx_rx_filter *filter = NULL;
1562
1563 p = &wow->patterns[i];
1564
1565 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1566 if (ret) {
1567 wl1271_warning("Failed to create an RX filter from "
1568 "wowlan pattern %d", i);
1569 goto out;
1570 }
1571
1572 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1573
1574 wl1271_rx_filter_free(filter);
1575 if (ret)
1576 goto out;
1577 }
1578
1579 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1580
1581 out:
1582 return ret;
1583 }
1584
1585 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1586 struct wl12xx_vif *wlvif,
1587 struct cfg80211_wowlan *wow)
1588 {
1589 int ret = 0;
1590
1591 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1592 goto out;
1593
1594 ret = wl1271_ps_elp_wakeup(wl);
1595 if (ret < 0)
1596 goto out;
1597
1598 ret = wl1271_configure_wowlan(wl, wow);
1599 if (ret < 0)
1600 goto out_sleep;
1601
1602 if ((wl->conf.conn.suspend_wake_up_event ==
1603 wl->conf.conn.wake_up_event) &&
1604 (wl->conf.conn.suspend_listen_interval ==
1605 wl->conf.conn.listen_interval))
1606 goto out_sleep;
1607
1608 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1609 wl->conf.conn.suspend_wake_up_event,
1610 wl->conf.conn.suspend_listen_interval);
1611
1612 if (ret < 0)
1613 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1614
1615 out_sleep:
1616 wl1271_ps_elp_sleep(wl);
1617 out:
1618 return ret;
1619
1620 }
1621
1622 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1623 struct wl12xx_vif *wlvif)
1624 {
1625 int ret = 0;
1626
1627 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1628 goto out;
1629
1630 ret = wl1271_ps_elp_wakeup(wl);
1631 if (ret < 0)
1632 goto out;
1633
1634 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1635
1636 wl1271_ps_elp_sleep(wl);
1637 out:
1638 return ret;
1639
1640 }
1641
1642 static int wl1271_configure_suspend(struct wl1271 *wl,
1643 struct wl12xx_vif *wlvif,
1644 struct cfg80211_wowlan *wow)
1645 {
1646 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1647 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1648 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1649 return wl1271_configure_suspend_ap(wl, wlvif);
1650 return 0;
1651 }
1652
1653 static void wl1271_configure_resume(struct wl1271 *wl,
1654 struct wl12xx_vif *wlvif)
1655 {
1656 int ret = 0;
1657 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1658 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1659
1660 if ((!is_ap) && (!is_sta))
1661 return;
1662
1663 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1664 return;
1665
1666 ret = wl1271_ps_elp_wakeup(wl);
1667 if (ret < 0)
1668 return;
1669
1670 if (is_sta) {
1671 wl1271_configure_wowlan(wl, NULL);
1672
1673 if ((wl->conf.conn.suspend_wake_up_event ==
1674 wl->conf.conn.wake_up_event) &&
1675 (wl->conf.conn.suspend_listen_interval ==
1676 wl->conf.conn.listen_interval))
1677 goto out_sleep;
1678
1679 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 wl->conf.conn.wake_up_event,
1681 wl->conf.conn.listen_interval);
1682
1683 if (ret < 0)
1684 wl1271_error("resume: wake up conditions failed: %d",
1685 ret);
1686
1687 } else if (is_ap) {
1688 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1689 }
1690
1691 out_sleep:
1692 wl1271_ps_elp_sleep(wl);
1693 }
1694
1695 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1696 struct cfg80211_wowlan *wow)
1697 {
1698 struct wl1271 *wl = hw->priv;
1699 struct wl12xx_vif *wlvif;
1700 int ret;
1701
1702 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1703 WARN_ON(!wow);
1704
1705 /* we want to perform the recovery before suspending */
1706 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1707 wl1271_warning("postponing suspend to perform recovery");
1708 return -EBUSY;
1709 }
1710
1711 wl1271_tx_flush(wl);
1712
1713 mutex_lock(&wl->mutex);
1714 wl->wow_enabled = true;
1715 wl12xx_for_each_wlvif(wl, wlvif) {
1716 ret = wl1271_configure_suspend(wl, wlvif, wow);
1717 if (ret < 0) {
1718 mutex_unlock(&wl->mutex);
1719 wl1271_warning("couldn't prepare device to suspend");
1720 return ret;
1721 }
1722 }
1723 mutex_unlock(&wl->mutex);
1724 /* flush any remaining work */
1725 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1726
1727 /*
1728 * disable and re-enable interrupts in order to flush
1729 * the threaded_irq
1730 */
1731 wlcore_disable_interrupts(wl);
1732
1733 /*
1734 * set suspended flag to avoid triggering a new threaded_irq
1735 * work. no need for spinlock as interrupts are disabled.
1736 */
1737 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1738
1739 wlcore_enable_interrupts(wl);
1740 flush_work(&wl->tx_work);
1741 flush_delayed_work(&wl->elp_work);
1742
1743 return 0;
1744 }
1745
1746 static int wl1271_op_resume(struct ieee80211_hw *hw)
1747 {
1748 struct wl1271 *wl = hw->priv;
1749 struct wl12xx_vif *wlvif;
1750 unsigned long flags;
1751 bool run_irq_work = false, pending_recovery;
1752 int ret;
1753
1754 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1755 wl->wow_enabled);
1756 WARN_ON(!wl->wow_enabled);
1757
1758 /*
1759 * re-enable irq_work enqueuing, and call irq_work directly if
1760 * there is a pending work.
1761 */
1762 spin_lock_irqsave(&wl->wl_lock, flags);
1763 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1764 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1765 run_irq_work = true;
1766 spin_unlock_irqrestore(&wl->wl_lock, flags);
1767
1768 mutex_lock(&wl->mutex);
1769
1770 /* test the recovery flag before calling any SDIO functions */
1771 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1772 &wl->flags);
1773
1774 if (run_irq_work) {
1775 wl1271_debug(DEBUG_MAC80211,
1776 "run postponed irq_work directly");
1777
1778 /* don't talk to the HW if recovery is pending */
1779 if (!pending_recovery) {
1780 ret = wlcore_irq_locked(wl);
1781 if (ret)
1782 wl12xx_queue_recovery_work(wl);
1783 }
1784
1785 wlcore_enable_interrupts(wl);
1786 }
1787
1788 if (pending_recovery) {
1789 wl1271_warning("queuing forgotten recovery on resume");
1790 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1791 goto out;
1792 }
1793
1794 wl12xx_for_each_wlvif(wl, wlvif) {
1795 wl1271_configure_resume(wl, wlvif);
1796 }
1797
1798 out:
1799 wl->wow_enabled = false;
1800 mutex_unlock(&wl->mutex);
1801
1802 return 0;
1803 }
1804 #endif
1805
1806 static int wl1271_op_start(struct ieee80211_hw *hw)
1807 {
1808 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1809
1810 /*
1811 * We have to delay the booting of the hardware because
1812 * we need to know the local MAC address before downloading and
1813 * initializing the firmware. The MAC address cannot be changed
1814 * after boot, and without the proper MAC address, the firmware
1815 * will not function properly.
1816 *
1817 * The MAC address is first known when the corresponding interface
1818 * is added. That is where we will initialize the hardware.
1819 */
1820
1821 return 0;
1822 }
1823
1824 static void wlcore_op_stop_locked(struct wl1271 *wl)
1825 {
1826 int i;
1827
1828 if (wl->state == WLCORE_STATE_OFF) {
1829 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1830 &wl->flags))
1831 wlcore_enable_interrupts(wl);
1832
1833 return;
1834 }
1835
1836 /*
1837 * this must be before the cancel_work calls below, so that the work
1838 * functions don't perform further work.
1839 */
1840 wl->state = WLCORE_STATE_OFF;
1841
1842 /*
1843 * Use the nosync variant to disable interrupts, so the mutex could be
1844 * held while doing so without deadlocking.
1845 */
1846 wlcore_disable_interrupts_nosync(wl);
1847
1848 mutex_unlock(&wl->mutex);
1849
1850 wlcore_synchronize_interrupts(wl);
1851 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1852 cancel_work_sync(&wl->recovery_work);
1853 wl1271_flush_deferred_work(wl);
1854 cancel_delayed_work_sync(&wl->scan_complete_work);
1855 cancel_work_sync(&wl->netstack_work);
1856 cancel_work_sync(&wl->tx_work);
1857 cancel_delayed_work_sync(&wl->elp_work);
1858 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1859
1860 /* let's notify MAC80211 about the remaining pending TX frames */
1861 mutex_lock(&wl->mutex);
1862 wl12xx_tx_reset(wl);
1863
1864 wl1271_power_off(wl);
1865 /*
1866 * In case a recovery was scheduled, interrupts were disabled to avoid
1867 * an interrupt storm. Now that the power is down, it is safe to
1868 * re-enable interrupts to balance the disable depth
1869 */
1870 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1871 wlcore_enable_interrupts(wl);
1872
1873 wl->band = IEEE80211_BAND_2GHZ;
1874
1875 wl->rx_counter = 0;
1876 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1877 wl->channel_type = NL80211_CHAN_NO_HT;
1878 wl->tx_blocks_available = 0;
1879 wl->tx_allocated_blocks = 0;
1880 wl->tx_results_count = 0;
1881 wl->tx_packets_count = 0;
1882 wl->time_offset = 0;
1883 wl->ap_fw_ps_map = 0;
1884 wl->ap_ps_map = 0;
1885 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1886 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1887 memset(wl->links_map, 0, sizeof(wl->links_map));
1888 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1889 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1890 wl->active_sta_count = 0;
1891 wl->active_link_count = 0;
1892
1893 /* The system link is always allocated */
1894 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1895 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1896 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1897
1898 /*
1899 * this is performed after the cancel_work calls and the associated
1900 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1901 * get executed before all these vars have been reset.
1902 */
1903 wl->flags = 0;
1904
1905 wl->tx_blocks_freed = 0;
1906
1907 for (i = 0; i < NUM_TX_QUEUES; i++) {
1908 wl->tx_pkts_freed[i] = 0;
1909 wl->tx_allocated_pkts[i] = 0;
1910 }
1911
1912 wl1271_debugfs_reset(wl);
1913
1914 kfree(wl->fw_status_1);
1915 wl->fw_status_1 = NULL;
1916 wl->fw_status_2 = NULL;
1917 kfree(wl->tx_res_if);
1918 wl->tx_res_if = NULL;
1919 kfree(wl->target_mem_map);
1920 wl->target_mem_map = NULL;
1921
1922 /*
1923 * FW channels must be re-calibrated after recovery,
1924 * clear the last Reg-Domain channel configuration.
1925 */
1926 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1927 }
1928
1929 static void wlcore_op_stop(struct ieee80211_hw *hw)
1930 {
1931 struct wl1271 *wl = hw->priv;
1932
1933 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1934
1935 mutex_lock(&wl->mutex);
1936
1937 wlcore_op_stop_locked(wl);
1938
1939 mutex_unlock(&wl->mutex);
1940 }
1941
1942 static void wlcore_channel_switch_work(struct work_struct *work)
1943 {
1944 struct delayed_work *dwork;
1945 struct wl1271 *wl;
1946 struct ieee80211_vif *vif;
1947 struct wl12xx_vif *wlvif;
1948 int ret;
1949
1950 dwork = container_of(work, struct delayed_work, work);
1951 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1952 wl = wlvif->wl;
1953
1954 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1955
1956 mutex_lock(&wl->mutex);
1957
1958 if (unlikely(wl->state != WLCORE_STATE_ON))
1959 goto out;
1960
1961 /* check the channel switch is still ongoing */
1962 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1963 goto out;
1964
1965 vif = wl12xx_wlvif_to_vif(wlvif);
1966 ieee80211_chswitch_done(vif, false);
1967
1968 ret = wl1271_ps_elp_wakeup(wl);
1969 if (ret < 0)
1970 goto out;
1971
1972 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1973
1974 wl1271_ps_elp_sleep(wl);
1975 out:
1976 mutex_unlock(&wl->mutex);
1977 }
1978
1979 static void wlcore_connection_loss_work(struct work_struct *work)
1980 {
1981 struct delayed_work *dwork;
1982 struct wl1271 *wl;
1983 struct ieee80211_vif *vif;
1984 struct wl12xx_vif *wlvif;
1985
1986 dwork = container_of(work, struct delayed_work, work);
1987 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1988 wl = wlvif->wl;
1989
1990 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1991
1992 mutex_lock(&wl->mutex);
1993
1994 if (unlikely(wl->state != WLCORE_STATE_ON))
1995 goto out;
1996
1997 /* Call mac80211 connection loss */
1998 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1999 goto out;
2000
2001 vif = wl12xx_wlvif_to_vif(wlvif);
2002 ieee80211_connection_loss(vif);
2003 out:
2004 mutex_unlock(&wl->mutex);
2005 }
2006
2007 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2008 {
2009 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2010 WL12XX_MAX_RATE_POLICIES);
2011 if (policy >= WL12XX_MAX_RATE_POLICIES)
2012 return -EBUSY;
2013
2014 __set_bit(policy, wl->rate_policies_map);
2015 *idx = policy;
2016 return 0;
2017 }
2018
2019 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2020 {
2021 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2022 return;
2023
2024 __clear_bit(*idx, wl->rate_policies_map);
2025 *idx = WL12XX_MAX_RATE_POLICIES;
2026 }
2027
2028 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2029 {
2030 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2031 WLCORE_MAX_KLV_TEMPLATES);
2032 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2033 return -EBUSY;
2034
2035 __set_bit(policy, wl->klv_templates_map);
2036 *idx = policy;
2037 return 0;
2038 }
2039
2040 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2041 {
2042 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2043 return;
2044
2045 __clear_bit(*idx, wl->klv_templates_map);
2046 *idx = WLCORE_MAX_KLV_TEMPLATES;
2047 }
2048
2049 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2050 {
2051 switch (wlvif->bss_type) {
2052 case BSS_TYPE_AP_BSS:
2053 if (wlvif->p2p)
2054 return WL1271_ROLE_P2P_GO;
2055 else
2056 return WL1271_ROLE_AP;
2057
2058 case BSS_TYPE_STA_BSS:
2059 if (wlvif->p2p)
2060 return WL1271_ROLE_P2P_CL;
2061 else
2062 return WL1271_ROLE_STA;
2063
2064 case BSS_TYPE_IBSS:
2065 return WL1271_ROLE_IBSS;
2066
2067 default:
2068 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2069 }
2070 return WL12XX_INVALID_ROLE_TYPE;
2071 }
2072
2073 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2074 {
2075 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2076 int i;
2077
2078 /* clear everything but the persistent data */
2079 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2080
2081 switch (ieee80211_vif_type_p2p(vif)) {
2082 case NL80211_IFTYPE_P2P_CLIENT:
2083 wlvif->p2p = 1;
2084 /* fall-through */
2085 case NL80211_IFTYPE_STATION:
2086 wlvif->bss_type = BSS_TYPE_STA_BSS;
2087 break;
2088 case NL80211_IFTYPE_ADHOC:
2089 wlvif->bss_type = BSS_TYPE_IBSS;
2090 break;
2091 case NL80211_IFTYPE_P2P_GO:
2092 wlvif->p2p = 1;
2093 /* fall-through */
2094 case NL80211_IFTYPE_AP:
2095 wlvif->bss_type = BSS_TYPE_AP_BSS;
2096 break;
2097 default:
2098 wlvif->bss_type = MAX_BSS_TYPE;
2099 return -EOPNOTSUPP;
2100 }
2101
2102 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2103 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2104 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2105
2106 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2107 wlvif->bss_type == BSS_TYPE_IBSS) {
2108 /* init sta/ibss data */
2109 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2110 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2111 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2112 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2113 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2114 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2115 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2116 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2117 } else {
2118 /* init ap data */
2119 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2120 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2121 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2122 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2123 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2124 wl12xx_allocate_rate_policy(wl,
2125 &wlvif->ap.ucast_rate_idx[i]);
2126 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2127 /*
2128 * TODO: check if basic_rate shouldn't be
2129 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2130 * instead (the same thing for STA above).
2131 */
2132 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2133 /* TODO: this seems to be used only for STA, check it */
2134 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2135 }
2136
2137 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2138 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2139 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2140
2141 /*
2142 * mac80211 configures some values globally, while we treat them
2143 * per-interface. thus, on init, we have to copy them from wl
2144 */
2145 wlvif->band = wl->band;
2146 wlvif->channel = wl->channel;
2147 wlvif->power_level = wl->power_level;
2148 wlvif->channel_type = wl->channel_type;
2149
2150 INIT_WORK(&wlvif->rx_streaming_enable_work,
2151 wl1271_rx_streaming_enable_work);
2152 INIT_WORK(&wlvif->rx_streaming_disable_work,
2153 wl1271_rx_streaming_disable_work);
2154 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2155 wlcore_channel_switch_work);
2156 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2157 wlcore_connection_loss_work);
2158 INIT_LIST_HEAD(&wlvif->list);
2159
2160 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2161 (unsigned long) wlvif);
2162 return 0;
2163 }
2164
2165 static bool wl12xx_init_fw(struct wl1271 *wl)
2166 {
2167 int retries = WL1271_BOOT_RETRIES;
2168 bool booted = false;
2169 struct wiphy *wiphy = wl->hw->wiphy;
2170 int ret;
2171
2172 while (retries) {
2173 retries--;
2174 ret = wl12xx_chip_wakeup(wl, false);
2175 if (ret < 0)
2176 goto power_off;
2177
2178 ret = wl->ops->boot(wl);
2179 if (ret < 0)
2180 goto power_off;
2181
2182 ret = wl1271_hw_init(wl);
2183 if (ret < 0)
2184 goto irq_disable;
2185
2186 booted = true;
2187 break;
2188
2189 irq_disable:
2190 mutex_unlock(&wl->mutex);
2191 /* Unlocking the mutex in the middle of handling is
2192 inherently unsafe. In this case we deem it safe to do,
2193 because we need to let any possibly pending IRQ out of
2194 the system (and while we are WLCORE_STATE_OFF the IRQ
2195 work function will not do anything.) Also, any other
2196 possible concurrent operations will fail due to the
2197 current state, hence the wl1271 struct should be safe. */
2198 wlcore_disable_interrupts(wl);
2199 wl1271_flush_deferred_work(wl);
2200 cancel_work_sync(&wl->netstack_work);
2201 mutex_lock(&wl->mutex);
2202 power_off:
2203 wl1271_power_off(wl);
2204 }
2205
2206 if (!booted) {
2207 wl1271_error("firmware boot failed despite %d retries",
2208 WL1271_BOOT_RETRIES);
2209 goto out;
2210 }
2211
2212 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2213
2214 /* update hw/fw version info in wiphy struct */
2215 wiphy->hw_version = wl->chip.id;
2216 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2217 sizeof(wiphy->fw_version));
2218
2219 /*
2220 * Now we know if 11a is supported (info from the NVS), so disable
2221 * 11a channels if not supported
2222 */
2223 if (!wl->enable_11a)
2224 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2225
2226 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2227 wl->enable_11a ? "" : "not ");
2228
2229 wl->state = WLCORE_STATE_ON;
2230 out:
2231 return booted;
2232 }
2233
2234 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2235 {
2236 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2237 }
2238
2239 /*
2240 * Check whether a fw switch (i.e. moving from one loaded
2241 * fw to another) is needed. This function is also responsible
2242 * for updating wl->last_vif_count, so it must be called before
2243 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2244 * will be used).
2245 */
2246 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2247 struct vif_counter_data vif_counter_data,
2248 bool add)
2249 {
2250 enum wl12xx_fw_type current_fw = wl->fw_type;
2251 u8 vif_count = vif_counter_data.counter;
2252
2253 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2254 return false;
2255
2256 /* increase the vif count if this is a new vif */
2257 if (add && !vif_counter_data.cur_vif_running)
2258 vif_count++;
2259
2260 wl->last_vif_count = vif_count;
2261
2262 /* no need for fw change if the device is OFF */
2263 if (wl->state == WLCORE_STATE_OFF)
2264 return false;
2265
2266 /* no need for fw change if a single fw is used */
2267 if (!wl->mr_fw_name)
2268 return false;
2269
2270 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2271 return true;
2272 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2273 return true;
2274
2275 return false;
2276 }
2277
2278 /*
2279 * Enter "forced psm". Make sure the sta is in psm against the ap,
2280 * to make the fw switch a bit more disconnection-persistent.
2281 */
2282 static void wl12xx_force_active_psm(struct wl1271 *wl)
2283 {
2284 struct wl12xx_vif *wlvif;
2285
2286 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2287 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2288 }
2289 }
2290
2291 struct wlcore_hw_queue_iter_data {
2292 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2293 /* current vif */
2294 struct ieee80211_vif *vif;
2295 /* is the current vif among those iterated */
2296 bool cur_running;
2297 };
2298
2299 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2300 struct ieee80211_vif *vif)
2301 {
2302 struct wlcore_hw_queue_iter_data *iter_data = data;
2303
2304 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2305 return;
2306
2307 if (iter_data->cur_running || vif == iter_data->vif) {
2308 iter_data->cur_running = true;
2309 return;
2310 }
2311
2312 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2313 }
2314
2315 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2316 struct wl12xx_vif *wlvif)
2317 {
2318 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2319 struct wlcore_hw_queue_iter_data iter_data = {};
2320 int i, q_base;
2321
2322 iter_data.vif = vif;
2323
2324 /* mark all bits taken by active interfaces */
2325 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2326 IEEE80211_IFACE_ITER_RESUME_ALL,
2327 wlcore_hw_queue_iter, &iter_data);
2328
2329 /* the current vif is already running in mac80211 (resume/recovery) */
2330 if (iter_data.cur_running) {
2331 wlvif->hw_queue_base = vif->hw_queue[0];
2332 wl1271_debug(DEBUG_MAC80211,
2333 "using pre-allocated hw queue base %d",
2334 wlvif->hw_queue_base);
2335
2336 /* interface type might have changed type */
2337 goto adjust_cab_queue;
2338 }
2339
2340 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2341 WLCORE_NUM_MAC_ADDRESSES);
2342 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2343 return -EBUSY;
2344
2345 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2346 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2347 wlvif->hw_queue_base);
2348
2349 for (i = 0; i < NUM_TX_QUEUES; i++) {
2350 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2351 /* register hw queues in mac80211 */
2352 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2353 }
2354
2355 adjust_cab_queue:
2356 /* the last places are reserved for cab queues per interface */
2357 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2358 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2359 wlvif->hw_queue_base / NUM_TX_QUEUES;
2360 else
2361 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2362
2363 return 0;
2364 }
2365
2366 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2367 struct ieee80211_vif *vif)
2368 {
2369 struct wl1271 *wl = hw->priv;
2370 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2371 struct vif_counter_data vif_count;
2372 int ret = 0;
2373 u8 role_type;
2374 bool booted = false;
2375
2376 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2377 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2378
2379 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2380 ieee80211_vif_type_p2p(vif), vif->addr);
2381
2382 wl12xx_get_vif_count(hw, vif, &vif_count);
2383
2384 mutex_lock(&wl->mutex);
2385 ret = wl1271_ps_elp_wakeup(wl);
2386 if (ret < 0)
2387 goto out_unlock;
2388
2389 /*
2390 * in some very corner case HW recovery scenarios its possible to
2391 * get here before __wl1271_op_remove_interface is complete, so
2392 * opt out if that is the case.
2393 */
2394 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2395 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2396 ret = -EBUSY;
2397 goto out;
2398 }
2399
2400
2401 ret = wl12xx_init_vif_data(wl, vif);
2402 if (ret < 0)
2403 goto out;
2404
2405 wlvif->wl = wl;
2406 role_type = wl12xx_get_role_type(wl, wlvif);
2407 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2408 ret = -EINVAL;
2409 goto out;
2410 }
2411
2412 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2413 if (ret < 0)
2414 goto out;
2415
2416 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2417 wl12xx_force_active_psm(wl);
2418 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2419 mutex_unlock(&wl->mutex);
2420 wl1271_recovery_work(&wl->recovery_work);
2421 return 0;
2422 }
2423
2424 /*
2425 * TODO: after the nvs issue will be solved, move this block
2426 * to start(), and make sure here the driver is ON.
2427 */
2428 if (wl->state == WLCORE_STATE_OFF) {
2429 /*
2430 * we still need this in order to configure the fw
2431 * while uploading the nvs
2432 */
2433 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2434
2435 booted = wl12xx_init_fw(wl);
2436 if (!booted) {
2437 ret = -EINVAL;
2438 goto out;
2439 }
2440 }
2441
2442 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2443 role_type, &wlvif->role_id);
2444 if (ret < 0)
2445 goto out;
2446
2447 ret = wl1271_init_vif_specific(wl, vif);
2448 if (ret < 0)
2449 goto out;
2450
2451 list_add(&wlvif->list, &wl->wlvif_list);
2452 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2453
2454 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2455 wl->ap_count++;
2456 else
2457 wl->sta_count++;
2458 out:
2459 wl1271_ps_elp_sleep(wl);
2460 out_unlock:
2461 mutex_unlock(&wl->mutex);
2462
2463 return ret;
2464 }
2465
2466 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2467 struct ieee80211_vif *vif,
2468 bool reset_tx_queues)
2469 {
2470 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2471 int i, ret;
2472 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2473
2474 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2475
2476 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2477 return;
2478
2479 /* because of hardware recovery, we may get here twice */
2480 if (wl->state == WLCORE_STATE_OFF)
2481 return;
2482
2483 wl1271_info("down");
2484
2485 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2486 wl->scan_wlvif == wlvif) {
2487 /*
2488 * Rearm the tx watchdog just before idling scan. This
2489 * prevents just-finished scans from triggering the watchdog
2490 */
2491 wl12xx_rearm_tx_watchdog_locked(wl);
2492
2493 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2494 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2495 wl->scan_wlvif = NULL;
2496 wl->scan.req = NULL;
2497 ieee80211_scan_completed(wl->hw, true);
2498 }
2499
2500 if (wl->sched_vif == wlvif) {
2501 ieee80211_sched_scan_stopped(wl->hw);
2502 wl->sched_vif = NULL;
2503 }
2504
2505 if (wl->roc_vif == vif) {
2506 wl->roc_vif = NULL;
2507 ieee80211_remain_on_channel_expired(wl->hw);
2508 }
2509
2510 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2511 /* disable active roles */
2512 ret = wl1271_ps_elp_wakeup(wl);
2513 if (ret < 0)
2514 goto deinit;
2515
2516 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2517 wlvif->bss_type == BSS_TYPE_IBSS) {
2518 if (wl12xx_dev_role_started(wlvif))
2519 wl12xx_stop_dev(wl, wlvif);
2520 }
2521
2522 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2523 if (ret < 0)
2524 goto deinit;
2525
2526 wl1271_ps_elp_sleep(wl);
2527 }
2528 deinit:
2529 /* clear all hlids (except system_hlid) */
2530 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2531
2532 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2533 wlvif->bss_type == BSS_TYPE_IBSS) {
2534 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2535 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2536 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2537 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2538 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2539 } else {
2540 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2541 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2542 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2543 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2544 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2545 wl12xx_free_rate_policy(wl,
2546 &wlvif->ap.ucast_rate_idx[i]);
2547 wl1271_free_ap_keys(wl, wlvif);
2548 }
2549
2550 dev_kfree_skb(wlvif->probereq);
2551 wlvif->probereq = NULL;
2552 wl12xx_tx_reset_wlvif(wl, wlvif);
2553 if (wl->last_wlvif == wlvif)
2554 wl->last_wlvif = NULL;
2555 list_del(&wlvif->list);
2556 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2557 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2558 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2559
2560 if (is_ap)
2561 wl->ap_count--;
2562 else
2563 wl->sta_count--;
2564
2565 /*
2566 * Last AP, have more stations. Configure sleep auth according to STA.
2567 * Don't do thin on unintended recovery.
2568 */
2569 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2570 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2571 goto unlock;
2572
2573 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2574 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2575 /* Configure for power according to debugfs */
2576 if (sta_auth != WL1271_PSM_ILLEGAL)
2577 wl1271_acx_sleep_auth(wl, sta_auth);
2578 /* Configure for ELP power saving */
2579 else
2580 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2581 }
2582
2583 unlock:
2584 mutex_unlock(&wl->mutex);
2585
2586 del_timer_sync(&wlvif->rx_streaming_timer);
2587 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2588 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2589 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2590
2591 mutex_lock(&wl->mutex);
2592 }
2593
2594 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2595 struct ieee80211_vif *vif)
2596 {
2597 struct wl1271 *wl = hw->priv;
2598 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2599 struct wl12xx_vif *iter;
2600 struct vif_counter_data vif_count;
2601
2602 wl12xx_get_vif_count(hw, vif, &vif_count);
2603 mutex_lock(&wl->mutex);
2604
2605 if (wl->state == WLCORE_STATE_OFF ||
2606 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2607 goto out;
2608
2609 /*
2610 * wl->vif can be null here if someone shuts down the interface
2611 * just when hardware recovery has been started.
2612 */
2613 wl12xx_for_each_wlvif(wl, iter) {
2614 if (iter != wlvif)
2615 continue;
2616
2617 __wl1271_op_remove_interface(wl, vif, true);
2618 break;
2619 }
2620 WARN_ON(iter != wlvif);
2621 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2622 wl12xx_force_active_psm(wl);
2623 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2624 wl12xx_queue_recovery_work(wl);
2625 }
2626 out:
2627 mutex_unlock(&wl->mutex);
2628 }
2629
2630 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2631 struct ieee80211_vif *vif,
2632 enum nl80211_iftype new_type, bool p2p)
2633 {
2634 struct wl1271 *wl = hw->priv;
2635 int ret;
2636
2637 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2638 wl1271_op_remove_interface(hw, vif);
2639
2640 vif->type = new_type;
2641 vif->p2p = p2p;
2642 ret = wl1271_op_add_interface(hw, vif);
2643
2644 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2645 return ret;
2646 }
2647
2648 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2649 {
2650 int ret;
2651 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2652
2653 /*
2654 * One of the side effects of the JOIN command is that is clears
2655 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2656 * to a WPA/WPA2 access point will therefore kill the data-path.
2657 * Currently the only valid scenario for JOIN during association
2658 * is on roaming, in which case we will also be given new keys.
2659 * Keep the below message for now, unless it starts bothering
2660 * users who really like to roam a lot :)
2661 */
2662 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2663 wl1271_info("JOIN while associated.");
2664
2665 /* clear encryption type */
2666 wlvif->encryption_type = KEY_NONE;
2667
2668 if (is_ibss)
2669 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2670 else {
2671 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2672 /*
2673 * TODO: this is an ugly workaround for wl12xx fw
2674 * bug - we are not able to tx/rx after the first
2675 * start_sta, so make dummy start+stop calls,
2676 * and then call start_sta again.
2677 * this should be fixed in the fw.
2678 */
2679 wl12xx_cmd_role_start_sta(wl, wlvif);
2680 wl12xx_cmd_role_stop_sta(wl, wlvif);
2681 }
2682
2683 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2684 }
2685
2686 return ret;
2687 }
2688
2689 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2690 int offset)
2691 {
2692 u8 ssid_len;
2693 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2694 skb->len - offset);
2695
2696 if (!ptr) {
2697 wl1271_error("No SSID in IEs!");
2698 return -ENOENT;
2699 }
2700
2701 ssid_len = ptr[1];
2702 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2703 wl1271_error("SSID is too long!");
2704 return -EINVAL;
2705 }
2706
2707 wlvif->ssid_len = ssid_len;
2708 memcpy(wlvif->ssid, ptr+2, ssid_len);
2709 return 0;
2710 }
2711
2712 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2713 {
2714 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2715 struct sk_buff *skb;
2716 int ieoffset;
2717
2718 /* we currently only support setting the ssid from the ap probe req */
2719 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2720 return -EINVAL;
2721
2722 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2723 if (!skb)
2724 return -EINVAL;
2725
2726 ieoffset = offsetof(struct ieee80211_mgmt,
2727 u.probe_req.variable);
2728 wl1271_ssid_set(wlvif, skb, ieoffset);
2729 dev_kfree_skb(skb);
2730
2731 return 0;
2732 }
2733
2734 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2735 struct ieee80211_bss_conf *bss_conf,
2736 u32 sta_rate_set)
2737 {
2738 int ieoffset;
2739 int ret;
2740
2741 wlvif->aid = bss_conf->aid;
2742 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2743 wlvif->beacon_int = bss_conf->beacon_int;
2744 wlvif->wmm_enabled = bss_conf->qos;
2745
2746 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2747
2748 /*
2749 * with wl1271, we don't need to update the
2750 * beacon_int and dtim_period, because the firmware
2751 * updates it by itself when the first beacon is
2752 * received after a join.
2753 */
2754 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2755 if (ret < 0)
2756 return ret;
2757
2758 /*
2759 * Get a template for hardware connection maintenance
2760 */
2761 dev_kfree_skb(wlvif->probereq);
2762 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2763 wlvif,
2764 NULL);
2765 ieoffset = offsetof(struct ieee80211_mgmt,
2766 u.probe_req.variable);
2767 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2768
2769 /* enable the connection monitoring feature */
2770 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2771 if (ret < 0)
2772 return ret;
2773
2774 /*
2775 * The join command disable the keep-alive mode, shut down its process,
2776 * and also clear the template config, so we need to reset it all after
2777 * the join. The acx_aid starts the keep-alive process, and the order
2778 * of the commands below is relevant.
2779 */
2780 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2781 if (ret < 0)
2782 return ret;
2783
2784 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2785 if (ret < 0)
2786 return ret;
2787
2788 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2789 if (ret < 0)
2790 return ret;
2791
2792 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2793 wlvif->sta.klv_template_id,
2794 ACX_KEEP_ALIVE_TPL_VALID);
2795 if (ret < 0)
2796 return ret;
2797
2798 /*
2799 * The default fw psm configuration is AUTO, while mac80211 default
2800 * setting is off (ACTIVE), so sync the fw with the correct value.
2801 */
2802 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2803 if (ret < 0)
2804 return ret;
2805
2806 if (sta_rate_set) {
2807 wlvif->rate_set =
2808 wl1271_tx_enabled_rates_get(wl,
2809 sta_rate_set,
2810 wlvif->band);
2811 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2812 if (ret < 0)
2813 return ret;
2814 }
2815
2816 return ret;
2817 }
2818
2819 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2820 {
2821 int ret;
2822 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2823
2824 /* make sure we are connected (sta) joined */
2825 if (sta &&
2826 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2827 return false;
2828
2829 /* make sure we are joined (ibss) */
2830 if (!sta &&
2831 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2832 return false;
2833
2834 if (sta) {
2835 /* use defaults when not associated */
2836 wlvif->aid = 0;
2837
2838 /* free probe-request template */
2839 dev_kfree_skb(wlvif->probereq);
2840 wlvif->probereq = NULL;
2841
2842 /* disable connection monitor features */
2843 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2844 if (ret < 0)
2845 return ret;
2846
2847 /* Disable the keep-alive feature */
2848 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2849 if (ret < 0)
2850 return ret;
2851 }
2852
2853 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2854 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2855
2856 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2857 ieee80211_chswitch_done(vif, false);
2858 cancel_delayed_work(&wlvif->channel_switch_work);
2859 }
2860
2861 /* invalidate keep-alive template */
2862 wl1271_acx_keep_alive_config(wl, wlvif,
2863 wlvif->sta.klv_template_id,
2864 ACX_KEEP_ALIVE_TPL_INVALID);
2865
2866 /* reset TX security counters on a clean disconnect */
2867 wlvif->tx_security_last_seq_lsb = 0;
2868 wlvif->tx_security_seq = 0;
2869
2870 return 0;
2871 }
2872
2873 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2874 {
2875 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2876 wlvif->rate_set = wlvif->basic_rate_set;
2877 }
2878
2879 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2880 struct ieee80211_conf *conf, u32 changed)
2881 {
2882 int ret;
2883
2884 if (conf->power_level != wlvif->power_level) {
2885 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2886 if (ret < 0)
2887 return ret;
2888
2889 wlvif->power_level = conf->power_level;
2890 }
2891
2892 return 0;
2893 }
2894
2895 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2896 {
2897 struct wl1271 *wl = hw->priv;
2898 struct wl12xx_vif *wlvif;
2899 struct ieee80211_conf *conf = &hw->conf;
2900 int ret = 0;
2901
2902 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2903 " changed 0x%x",
2904 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2905 conf->power_level,
2906 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2907 changed);
2908
2909 mutex_lock(&wl->mutex);
2910
2911 if (changed & IEEE80211_CONF_CHANGE_POWER)
2912 wl->power_level = conf->power_level;
2913
2914 if (unlikely(wl->state != WLCORE_STATE_ON))
2915 goto out;
2916
2917 ret = wl1271_ps_elp_wakeup(wl);
2918 if (ret < 0)
2919 goto out;
2920
2921 /* configure each interface */
2922 wl12xx_for_each_wlvif(wl, wlvif) {
2923 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2924 if (ret < 0)
2925 goto out_sleep;
2926 }
2927
2928 out_sleep:
2929 wl1271_ps_elp_sleep(wl);
2930
2931 out:
2932 mutex_unlock(&wl->mutex);
2933
2934 return ret;
2935 }
2936
2937 struct wl1271_filter_params {
2938 bool enabled;
2939 int mc_list_length;
2940 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2941 };
2942
2943 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2944 struct netdev_hw_addr_list *mc_list)
2945 {
2946 struct wl1271_filter_params *fp;
2947 struct netdev_hw_addr *ha;
2948
2949 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2950 if (!fp) {
2951 wl1271_error("Out of memory setting filters.");
2952 return 0;
2953 }
2954
2955 /* update multicast filtering parameters */
2956 fp->mc_list_length = 0;
2957 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2958 fp->enabled = false;
2959 } else {
2960 fp->enabled = true;
2961 netdev_hw_addr_list_for_each(ha, mc_list) {
2962 memcpy(fp->mc_list[fp->mc_list_length],
2963 ha->addr, ETH_ALEN);
2964 fp->mc_list_length++;
2965 }
2966 }
2967
2968 return (u64)(unsigned long)fp;
2969 }
2970
2971 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2972 FIF_ALLMULTI | \
2973 FIF_FCSFAIL | \
2974 FIF_BCN_PRBRESP_PROMISC | \
2975 FIF_CONTROL | \
2976 FIF_OTHER_BSS)
2977
2978 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2979 unsigned int changed,
2980 unsigned int *total, u64 multicast)
2981 {
2982 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2983 struct wl1271 *wl = hw->priv;
2984 struct wl12xx_vif *wlvif;
2985
2986 int ret;
2987
2988 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2989 " total %x", changed, *total);
2990
2991 mutex_lock(&wl->mutex);
2992
2993 *total &= WL1271_SUPPORTED_FILTERS;
2994 changed &= WL1271_SUPPORTED_FILTERS;
2995
2996 if (unlikely(wl->state != WLCORE_STATE_ON))
2997 goto out;
2998
2999 ret = wl1271_ps_elp_wakeup(wl);
3000 if (ret < 0)
3001 goto out;
3002
3003 wl12xx_for_each_wlvif(wl, wlvif) {
3004 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3005 if (*total & FIF_ALLMULTI)
3006 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3007 false,
3008 NULL, 0);
3009 else if (fp)
3010 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3011 fp->enabled,
3012 fp->mc_list,
3013 fp->mc_list_length);
3014 if (ret < 0)
3015 goto out_sleep;
3016 }
3017 }
3018
3019 /*
3020 * the fw doesn't provide an api to configure the filters. instead,
3021 * the filters configuration is based on the active roles / ROC
3022 * state.
3023 */
3024
3025 out_sleep:
3026 wl1271_ps_elp_sleep(wl);
3027
3028 out:
3029 mutex_unlock(&wl->mutex);
3030 kfree(fp);
3031 }
3032
3033 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3034 u8 id, u8 key_type, u8 key_size,
3035 const u8 *key, u8 hlid, u32 tx_seq_32,
3036 u16 tx_seq_16)
3037 {
3038 struct wl1271_ap_key *ap_key;
3039 int i;
3040
3041 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3042
3043 if (key_size > MAX_KEY_SIZE)
3044 return -EINVAL;
3045
3046 /*
3047 * Find next free entry in ap_keys. Also check we are not replacing
3048 * an existing key.
3049 */
3050 for (i = 0; i < MAX_NUM_KEYS; i++) {
3051 if (wlvif->ap.recorded_keys[i] == NULL)
3052 break;
3053
3054 if (wlvif->ap.recorded_keys[i]->id == id) {
3055 wl1271_warning("trying to record key replacement");
3056 return -EINVAL;
3057 }
3058 }
3059
3060 if (i == MAX_NUM_KEYS)
3061 return -EBUSY;
3062
3063 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3064 if (!ap_key)
3065 return -ENOMEM;
3066
3067 ap_key->id = id;
3068 ap_key->key_type = key_type;
3069 ap_key->key_size = key_size;
3070 memcpy(ap_key->key, key, key_size);
3071 ap_key->hlid = hlid;
3072 ap_key->tx_seq_32 = tx_seq_32;
3073 ap_key->tx_seq_16 = tx_seq_16;
3074
3075 wlvif->ap.recorded_keys[i] = ap_key;
3076 return 0;
3077 }
3078
3079 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3080 {
3081 int i;
3082
3083 for (i = 0; i < MAX_NUM_KEYS; i++) {
3084 kfree(wlvif->ap.recorded_keys[i]);
3085 wlvif->ap.recorded_keys[i] = NULL;
3086 }
3087 }
3088
3089 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3090 {
3091 int i, ret = 0;
3092 struct wl1271_ap_key *key;
3093 bool wep_key_added = false;
3094
3095 for (i = 0; i < MAX_NUM_KEYS; i++) {
3096 u8 hlid;
3097 if (wlvif->ap.recorded_keys[i] == NULL)
3098 break;
3099
3100 key = wlvif->ap.recorded_keys[i];
3101 hlid = key->hlid;
3102 if (hlid == WL12XX_INVALID_LINK_ID)
3103 hlid = wlvif->ap.bcast_hlid;
3104
3105 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3106 key->id, key->key_type,
3107 key->key_size, key->key,
3108 hlid, key->tx_seq_32,
3109 key->tx_seq_16);
3110 if (ret < 0)
3111 goto out;
3112
3113 if (key->key_type == KEY_WEP)
3114 wep_key_added = true;
3115 }
3116
3117 if (wep_key_added) {
3118 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3119 wlvif->ap.bcast_hlid);
3120 if (ret < 0)
3121 goto out;
3122 }
3123
3124 out:
3125 wl1271_free_ap_keys(wl, wlvif);
3126 return ret;
3127 }
3128
3129 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3130 u16 action, u8 id, u8 key_type,
3131 u8 key_size, const u8 *key, u32 tx_seq_32,
3132 u16 tx_seq_16, struct ieee80211_sta *sta)
3133 {
3134 int ret;
3135 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3136
3137 if (is_ap) {
3138 struct wl1271_station *wl_sta;
3139 u8 hlid;
3140
3141 if (sta) {
3142 wl_sta = (struct wl1271_station *)sta->drv_priv;
3143 hlid = wl_sta->hlid;
3144 } else {
3145 hlid = wlvif->ap.bcast_hlid;
3146 }
3147
3148 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3149 /*
3150 * We do not support removing keys after AP shutdown.
3151 * Pretend we do to make mac80211 happy.
3152 */
3153 if (action != KEY_ADD_OR_REPLACE)
3154 return 0;
3155
3156 ret = wl1271_record_ap_key(wl, wlvif, id,
3157 key_type, key_size,
3158 key, hlid, tx_seq_32,
3159 tx_seq_16);
3160 } else {
3161 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3162 id, key_type, key_size,
3163 key, hlid, tx_seq_32,
3164 tx_seq_16);
3165 }
3166
3167 if (ret < 0)
3168 return ret;
3169 } else {
3170 const u8 *addr;
3171 static const u8 bcast_addr[ETH_ALEN] = {
3172 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3173 };
3174
3175 addr = sta ? sta->addr : bcast_addr;
3176
3177 if (is_zero_ether_addr(addr)) {
3178 /* We dont support TX only encryption */
3179 return -EOPNOTSUPP;
3180 }
3181
3182 /* The wl1271 does not allow to remove unicast keys - they
3183 will be cleared automatically on next CMD_JOIN. Ignore the
3184 request silently, as we dont want the mac80211 to emit
3185 an error message. */
3186 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3187 return 0;
3188
3189 /* don't remove key if hlid was already deleted */
3190 if (action == KEY_REMOVE &&
3191 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3192 return 0;
3193
3194 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3195 id, key_type, key_size,
3196 key, addr, tx_seq_32,
3197 tx_seq_16);
3198 if (ret < 0)
3199 return ret;
3200
3201 /* the default WEP key needs to be configured at least once */
3202 if (key_type == KEY_WEP) {
3203 ret = wl12xx_cmd_set_default_wep_key(wl,
3204 wlvif->default_key,
3205 wlvif->sta.hlid);
3206 if (ret < 0)
3207 return ret;
3208 }
3209 }
3210
3211 return 0;
3212 }
3213
3214 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3215 struct ieee80211_vif *vif,
3216 struct ieee80211_sta *sta,
3217 struct ieee80211_key_conf *key_conf)
3218 {
3219 struct wl1271 *wl = hw->priv;
3220 int ret;
3221 bool might_change_spare =
3222 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3223 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3224
3225 if (might_change_spare) {
3226 /*
3227 * stop the queues and flush to ensure the next packets are
3228 * in sync with FW spare block accounting
3229 */
3230 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3231 wl1271_tx_flush(wl);
3232 }
3233
3234 mutex_lock(&wl->mutex);
3235
3236 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3237 ret = -EAGAIN;
3238 goto out_wake_queues;
3239 }
3240
3241 ret = wl1271_ps_elp_wakeup(wl);
3242 if (ret < 0)
3243 goto out_wake_queues;
3244
3245 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3246
3247 wl1271_ps_elp_sleep(wl);
3248
3249 out_wake_queues:
3250 if (might_change_spare)
3251 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3252
3253 mutex_unlock(&wl->mutex);
3254
3255 return ret;
3256 }
3257
3258 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3259 struct ieee80211_vif *vif,
3260 struct ieee80211_sta *sta,
3261 struct ieee80211_key_conf *key_conf)
3262 {
3263 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3264 int ret;
3265 u32 tx_seq_32 = 0;
3266 u16 tx_seq_16 = 0;
3267 u8 key_type;
3268
3269 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3270
3271 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3272 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3273 key_conf->cipher, key_conf->keyidx,
3274 key_conf->keylen, key_conf->flags);
3275 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3276
3277 switch (key_conf->cipher) {
3278 case WLAN_CIPHER_SUITE_WEP40:
3279 case WLAN_CIPHER_SUITE_WEP104:
3280 key_type = KEY_WEP;
3281
3282 key_conf->hw_key_idx = key_conf->keyidx;
3283 break;
3284 case WLAN_CIPHER_SUITE_TKIP:
3285 key_type = KEY_TKIP;
3286
3287 key_conf->hw_key_idx = key_conf->keyidx;
3288 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3289 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3290 break;
3291 case WLAN_CIPHER_SUITE_CCMP:
3292 key_type = KEY_AES;
3293
3294 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3295 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3296 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3297 break;
3298 case WL1271_CIPHER_SUITE_GEM:
3299 key_type = KEY_GEM;
3300 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3301 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3302 break;
3303 default:
3304 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3305
3306 return -EOPNOTSUPP;
3307 }
3308
3309 switch (cmd) {
3310 case SET_KEY:
3311 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3312 key_conf->keyidx, key_type,
3313 key_conf->keylen, key_conf->key,
3314 tx_seq_32, tx_seq_16, sta);
3315 if (ret < 0) {
3316 wl1271_error("Could not add or replace key");
3317 return ret;
3318 }
3319
3320 /*
3321 * reconfiguring arp response if the unicast (or common)
3322 * encryption key type was changed
3323 */
3324 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3325 (sta || key_type == KEY_WEP) &&
3326 wlvif->encryption_type != key_type) {
3327 wlvif->encryption_type = key_type;
3328 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3329 if (ret < 0) {
3330 wl1271_warning("build arp rsp failed: %d", ret);
3331 return ret;
3332 }
3333 }
3334 break;
3335
3336 case DISABLE_KEY:
3337 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3338 key_conf->keyidx, key_type,
3339 key_conf->keylen, key_conf->key,
3340 0, 0, sta);
3341 if (ret < 0) {
3342 wl1271_error("Could not remove key");
3343 return ret;
3344 }
3345 break;
3346
3347 default:
3348 wl1271_error("Unsupported key cmd 0x%x", cmd);
3349 return -EOPNOTSUPP;
3350 }
3351
3352 return ret;
3353 }
3354 EXPORT_SYMBOL_GPL(wlcore_set_key);
3355
3356 void wlcore_regdomain_config(struct wl1271 *wl)
3357 {
3358 int ret;
3359
3360 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3361 return;
3362
3363 mutex_lock(&wl->mutex);
3364 ret = wl1271_ps_elp_wakeup(wl);
3365 if (ret < 0)
3366 goto out;
3367
3368 ret = wlcore_cmd_regdomain_config_locked(wl);
3369 if (ret < 0) {
3370 wl12xx_queue_recovery_work(wl);
3371 goto out;
3372 }
3373
3374 wl1271_ps_elp_sleep(wl);
3375 out:
3376 mutex_unlock(&wl->mutex);
3377 }
3378
3379 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3380 struct ieee80211_vif *vif,
3381 struct cfg80211_scan_request *req)
3382 {
3383 struct wl1271 *wl = hw->priv;
3384 int ret;
3385 u8 *ssid = NULL;
3386 size_t len = 0;
3387
3388 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3389
3390 if (req->n_ssids) {
3391 ssid = req->ssids[0].ssid;
3392 len = req->ssids[0].ssid_len;
3393 }
3394
3395 mutex_lock(&wl->mutex);
3396
3397 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3398 /*
3399 * We cannot return -EBUSY here because cfg80211 will expect
3400 * a call to ieee80211_scan_completed if we do - in this case
3401 * there won't be any call.
3402 */
3403 ret = -EAGAIN;
3404 goto out;
3405 }
3406
3407 ret = wl1271_ps_elp_wakeup(wl);
3408 if (ret < 0)
3409 goto out;
3410
3411 /* fail if there is any role in ROC */
3412 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3413 /* don't allow scanning right now */
3414 ret = -EBUSY;
3415 goto out_sleep;
3416 }
3417
3418 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3419 out_sleep:
3420 wl1271_ps_elp_sleep(wl);
3421 out:
3422 mutex_unlock(&wl->mutex);
3423
3424 return ret;
3425 }
3426
3427 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3428 struct ieee80211_vif *vif)
3429 {
3430 struct wl1271 *wl = hw->priv;
3431 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3432 int ret;
3433
3434 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3435
3436 mutex_lock(&wl->mutex);
3437
3438 if (unlikely(wl->state != WLCORE_STATE_ON))
3439 goto out;
3440
3441 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3442 goto out;
3443
3444 ret = wl1271_ps_elp_wakeup(wl);
3445 if (ret < 0)
3446 goto out;
3447
3448 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3449 ret = wl->ops->scan_stop(wl, wlvif);
3450 if (ret < 0)
3451 goto out_sleep;
3452 }
3453
3454 /*
3455 * Rearm the tx watchdog just before idling scan. This
3456 * prevents just-finished scans from triggering the watchdog
3457 */
3458 wl12xx_rearm_tx_watchdog_locked(wl);
3459
3460 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3461 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3462 wl->scan_wlvif = NULL;
3463 wl->scan.req = NULL;
3464 ieee80211_scan_completed(wl->hw, true);
3465
3466 out_sleep:
3467 wl1271_ps_elp_sleep(wl);
3468 out:
3469 mutex_unlock(&wl->mutex);
3470
3471 cancel_delayed_work_sync(&wl->scan_complete_work);
3472 }
3473
3474 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3475 struct ieee80211_vif *vif,
3476 struct cfg80211_sched_scan_request *req,
3477 struct ieee80211_sched_scan_ies *ies)
3478 {
3479 struct wl1271 *wl = hw->priv;
3480 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3481 int ret;
3482
3483 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3484
3485 mutex_lock(&wl->mutex);
3486
3487 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3488 ret = -EAGAIN;
3489 goto out;
3490 }
3491
3492 ret = wl1271_ps_elp_wakeup(wl);
3493 if (ret < 0)
3494 goto out;
3495
3496 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3497 if (ret < 0)
3498 goto out_sleep;
3499
3500 wl->sched_vif = wlvif;
3501
3502 out_sleep:
3503 wl1271_ps_elp_sleep(wl);
3504 out:
3505 mutex_unlock(&wl->mutex);
3506 return ret;
3507 }
3508
3509 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3510 struct ieee80211_vif *vif)
3511 {
3512 struct wl1271 *wl = hw->priv;
3513 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3514 int ret;
3515
3516 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3517
3518 mutex_lock(&wl->mutex);
3519
3520 if (unlikely(wl->state != WLCORE_STATE_ON))
3521 goto out;
3522
3523 ret = wl1271_ps_elp_wakeup(wl);
3524 if (ret < 0)
3525 goto out;
3526
3527 wl->ops->sched_scan_stop(wl, wlvif);
3528
3529 wl1271_ps_elp_sleep(wl);
3530 out:
3531 mutex_unlock(&wl->mutex);
3532 }
3533
3534 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3535 {
3536 struct wl1271 *wl = hw->priv;
3537 int ret = 0;
3538
3539 mutex_lock(&wl->mutex);
3540
3541 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3542 ret = -EAGAIN;
3543 goto out;
3544 }
3545
3546 ret = wl1271_ps_elp_wakeup(wl);
3547 if (ret < 0)
3548 goto out;
3549
3550 ret = wl1271_acx_frag_threshold(wl, value);
3551 if (ret < 0)
3552 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3553
3554 wl1271_ps_elp_sleep(wl);
3555
3556 out:
3557 mutex_unlock(&wl->mutex);
3558
3559 return ret;
3560 }
3561
3562 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3563 {
3564 struct wl1271 *wl = hw->priv;
3565 struct wl12xx_vif *wlvif;
3566 int ret = 0;
3567
3568 mutex_lock(&wl->mutex);
3569
3570 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3571 ret = -EAGAIN;
3572 goto out;
3573 }
3574
3575 ret = wl1271_ps_elp_wakeup(wl);
3576 if (ret < 0)
3577 goto out;
3578
3579 wl12xx_for_each_wlvif(wl, wlvif) {
3580 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3581 if (ret < 0)
3582 wl1271_warning("set rts threshold failed: %d", ret);
3583 }
3584 wl1271_ps_elp_sleep(wl);
3585
3586 out:
3587 mutex_unlock(&wl->mutex);
3588
3589 return ret;
3590 }
3591
3592 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3593 {
3594 int len;
3595 const u8 *next, *end = skb->data + skb->len;
3596 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3597 skb->len - ieoffset);
3598 if (!ie)
3599 return;
3600 len = ie[1] + 2;
3601 next = ie + len;
3602 memmove(ie, next, end - next);
3603 skb_trim(skb, skb->len - len);
3604 }
3605
3606 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3607 unsigned int oui, u8 oui_type,
3608 int ieoffset)
3609 {
3610 int len;
3611 const u8 *next, *end = skb->data + skb->len;
3612 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3613 skb->data + ieoffset,
3614 skb->len - ieoffset);
3615 if (!ie)
3616 return;
3617 len = ie[1] + 2;
3618 next = ie + len;
3619 memmove(ie, next, end - next);
3620 skb_trim(skb, skb->len - len);
3621 }
3622
3623 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3624 struct ieee80211_vif *vif)
3625 {
3626 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3627 struct sk_buff *skb;
3628 int ret;
3629
3630 skb = ieee80211_proberesp_get(wl->hw, vif);
3631 if (!skb)
3632 return -EOPNOTSUPP;
3633
3634 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3635 CMD_TEMPL_AP_PROBE_RESPONSE,
3636 skb->data,
3637 skb->len, 0,
3638 rates);
3639 dev_kfree_skb(skb);
3640
3641 if (ret < 0)
3642 goto out;
3643
3644 wl1271_debug(DEBUG_AP, "probe response updated");
3645 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3646
3647 out:
3648 return ret;
3649 }
3650
3651 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3652 struct ieee80211_vif *vif,
3653 u8 *probe_rsp_data,
3654 size_t probe_rsp_len,
3655 u32 rates)
3656 {
3657 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3658 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3659 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3660 int ssid_ie_offset, ie_offset, templ_len;
3661 const u8 *ptr;
3662
3663 /* no need to change probe response if the SSID is set correctly */
3664 if (wlvif->ssid_len > 0)
3665 return wl1271_cmd_template_set(wl, wlvif->role_id,
3666 CMD_TEMPL_AP_PROBE_RESPONSE,
3667 probe_rsp_data,
3668 probe_rsp_len, 0,
3669 rates);
3670
3671 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3672 wl1271_error("probe_rsp template too big");
3673 return -EINVAL;
3674 }
3675
3676 /* start searching from IE offset */
3677 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3678
3679 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3680 probe_rsp_len - ie_offset);
3681 if (!ptr) {
3682 wl1271_error("No SSID in beacon!");
3683 return -EINVAL;
3684 }
3685
3686 ssid_ie_offset = ptr - probe_rsp_data;
3687 ptr += (ptr[1] + 2);
3688
3689 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3690
3691 /* insert SSID from bss_conf */
3692 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3693 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3694 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3695 bss_conf->ssid, bss_conf->ssid_len);
3696 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3697
3698 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3699 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3700 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3701
3702 return wl1271_cmd_template_set(wl, wlvif->role_id,
3703 CMD_TEMPL_AP_PROBE_RESPONSE,
3704 probe_rsp_templ,
3705 templ_len, 0,
3706 rates);
3707 }
3708
3709 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3710 struct ieee80211_vif *vif,
3711 struct ieee80211_bss_conf *bss_conf,
3712 u32 changed)
3713 {
3714 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3715 int ret = 0;
3716
3717 if (changed & BSS_CHANGED_ERP_SLOT) {
3718 if (bss_conf->use_short_slot)
3719 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3720 else
3721 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3722 if (ret < 0) {
3723 wl1271_warning("Set slot time failed %d", ret);
3724 goto out;
3725 }
3726 }
3727
3728 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3729 if (bss_conf->use_short_preamble)
3730 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3731 else
3732 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3733 }
3734
3735 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3736 if (bss_conf->use_cts_prot)
3737 ret = wl1271_acx_cts_protect(wl, wlvif,
3738 CTSPROTECT_ENABLE);
3739 else
3740 ret = wl1271_acx_cts_protect(wl, wlvif,
3741 CTSPROTECT_DISABLE);
3742 if (ret < 0) {
3743 wl1271_warning("Set ctsprotect failed %d", ret);
3744 goto out;
3745 }
3746 }
3747
3748 out:
3749 return ret;
3750 }
3751
3752 static int wlcore_set_beacon_template(struct wl1271 *wl,
3753 struct ieee80211_vif *vif,
3754 bool is_ap)
3755 {
3756 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3757 struct ieee80211_hdr *hdr;
3758 u32 min_rate;
3759 int ret;
3760 int ieoffset = offsetof(struct ieee80211_mgmt,
3761 u.beacon.variable);
3762 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3763 u16 tmpl_id;
3764
3765 if (!beacon) {
3766 ret = -EINVAL;
3767 goto out;
3768 }
3769
3770 wl1271_debug(DEBUG_MASTER, "beacon updated");
3771
3772 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3773 if (ret < 0) {
3774 dev_kfree_skb(beacon);
3775 goto out;
3776 }
3777 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3778 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3779 CMD_TEMPL_BEACON;
3780 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3781 beacon->data,
3782 beacon->len, 0,
3783 min_rate);
3784 if (ret < 0) {
3785 dev_kfree_skb(beacon);
3786 goto out;
3787 }
3788
3789 wlvif->wmm_enabled =
3790 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3791 WLAN_OUI_TYPE_MICROSOFT_WMM,
3792 beacon->data + ieoffset,
3793 beacon->len - ieoffset);
3794
3795 /*
3796 * In case we already have a probe-resp beacon set explicitly
3797 * by usermode, don't use the beacon data.
3798 */
3799 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3800 goto end_bcn;
3801
3802 /* remove TIM ie from probe response */
3803 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3804
3805 /*
3806 * remove p2p ie from probe response.
3807 * the fw reponds to probe requests that don't include
3808 * the p2p ie. probe requests with p2p ie will be passed,
3809 * and will be responded by the supplicant (the spec
3810 * forbids including the p2p ie when responding to probe
3811 * requests that didn't include it).
3812 */
3813 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3814 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3815
3816 hdr = (struct ieee80211_hdr *) beacon->data;
3817 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3818 IEEE80211_STYPE_PROBE_RESP);
3819 if (is_ap)
3820 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3821 beacon->data,
3822 beacon->len,
3823 min_rate);
3824 else
3825 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3826 CMD_TEMPL_PROBE_RESPONSE,
3827 beacon->data,
3828 beacon->len, 0,
3829 min_rate);
3830 end_bcn:
3831 dev_kfree_skb(beacon);
3832 if (ret < 0)
3833 goto out;
3834
3835 out:
3836 return ret;
3837 }
3838
3839 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3840 struct ieee80211_vif *vif,
3841 struct ieee80211_bss_conf *bss_conf,
3842 u32 changed)
3843 {
3844 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3845 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3846 int ret = 0;
3847
3848 if (changed & BSS_CHANGED_BEACON_INT) {
3849 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3850 bss_conf->beacon_int);
3851
3852 wlvif->beacon_int = bss_conf->beacon_int;
3853 }
3854
3855 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3856 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3857
3858 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3859 }
3860
3861 if (changed & BSS_CHANGED_BEACON) {
3862 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3863 if (ret < 0)
3864 goto out;
3865 }
3866
3867 out:
3868 if (ret != 0)
3869 wl1271_error("beacon info change failed: %d", ret);
3870 return ret;
3871 }
3872
3873 /* AP mode changes */
3874 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3875 struct ieee80211_vif *vif,
3876 struct ieee80211_bss_conf *bss_conf,
3877 u32 changed)
3878 {
3879 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3880 int ret = 0;
3881
3882 if (changed & BSS_CHANGED_BASIC_RATES) {
3883 u32 rates = bss_conf->basic_rates;
3884
3885 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3886 wlvif->band);
3887 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3888 wlvif->basic_rate_set);
3889
3890 ret = wl1271_init_ap_rates(wl, wlvif);
3891 if (ret < 0) {
3892 wl1271_error("AP rate policy change failed %d", ret);
3893 goto out;
3894 }
3895
3896 ret = wl1271_ap_init_templates(wl, vif);
3897 if (ret < 0)
3898 goto out;
3899
3900 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3901 if (ret < 0)
3902 goto out;
3903
3904 ret = wlcore_set_beacon_template(wl, vif, true);
3905 if (ret < 0)
3906 goto out;
3907 }
3908
3909 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3910 if (ret < 0)
3911 goto out;
3912
3913 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3914 if (bss_conf->enable_beacon) {
3915 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3916 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3917 if (ret < 0)
3918 goto out;
3919
3920 ret = wl1271_ap_init_hwenc(wl, wlvif);
3921 if (ret < 0)
3922 goto out;
3923
3924 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3925 wl1271_debug(DEBUG_AP, "started AP");
3926 }
3927 } else {
3928 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3929 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3930 if (ret < 0)
3931 goto out;
3932
3933 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3934 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3935 &wlvif->flags);
3936 wl1271_debug(DEBUG_AP, "stopped AP");
3937 }
3938 }
3939 }
3940
3941 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3942 if (ret < 0)
3943 goto out;
3944
3945 /* Handle HT information change */
3946 if ((changed & BSS_CHANGED_HT) &&
3947 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3948 ret = wl1271_acx_set_ht_information(wl, wlvif,
3949 bss_conf->ht_operation_mode);
3950 if (ret < 0) {
3951 wl1271_warning("Set ht information failed %d", ret);
3952 goto out;
3953 }
3954 }
3955
3956 out:
3957 return;
3958 }
3959
3960 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3961 struct ieee80211_bss_conf *bss_conf,
3962 u32 sta_rate_set)
3963 {
3964 u32 rates;
3965 int ret;
3966
3967 wl1271_debug(DEBUG_MAC80211,
3968 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3969 bss_conf->bssid, bss_conf->aid,
3970 bss_conf->beacon_int,
3971 bss_conf->basic_rates, sta_rate_set);
3972
3973 wlvif->beacon_int = bss_conf->beacon_int;
3974 rates = bss_conf->basic_rates;
3975 wlvif->basic_rate_set =
3976 wl1271_tx_enabled_rates_get(wl, rates,
3977 wlvif->band);
3978 wlvif->basic_rate =
3979 wl1271_tx_min_rate_get(wl,
3980 wlvif->basic_rate_set);
3981
3982 if (sta_rate_set)
3983 wlvif->rate_set =
3984 wl1271_tx_enabled_rates_get(wl,
3985 sta_rate_set,
3986 wlvif->band);
3987
3988 /* we only support sched_scan while not connected */
3989 if (wl->sched_vif == wlvif)
3990 wl->ops->sched_scan_stop(wl, wlvif);
3991
3992 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3993 if (ret < 0)
3994 return ret;
3995
3996 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3997 if (ret < 0)
3998 return ret;
3999
4000 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4001 if (ret < 0)
4002 return ret;
4003
4004 wlcore_set_ssid(wl, wlvif);
4005
4006 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4007
4008 return 0;
4009 }
4010
4011 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4012 {
4013 int ret;
4014
4015 /* revert back to minimum rates for the current band */
4016 wl1271_set_band_rate(wl, wlvif);
4017 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4018
4019 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4020 if (ret < 0)
4021 return ret;
4022
4023 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4024 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4025 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4026 if (ret < 0)
4027 return ret;
4028 }
4029
4030 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4031 return 0;
4032 }
4033 /* STA/IBSS mode changes */
4034 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4035 struct ieee80211_vif *vif,
4036 struct ieee80211_bss_conf *bss_conf,
4037 u32 changed)
4038 {
4039 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4040 bool do_join = false;
4041 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4042 bool ibss_joined = false;
4043 u32 sta_rate_set = 0;
4044 int ret;
4045 struct ieee80211_sta *sta;
4046 bool sta_exists = false;
4047 struct ieee80211_sta_ht_cap sta_ht_cap;
4048
4049 if (is_ibss) {
4050 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4051 changed);
4052 if (ret < 0)
4053 goto out;
4054 }
4055
4056 if (changed & BSS_CHANGED_IBSS) {
4057 if (bss_conf->ibss_joined) {
4058 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4059 ibss_joined = true;
4060 } else {
4061 wlcore_unset_assoc(wl, wlvif);
4062 wl12xx_cmd_role_stop_sta(wl, wlvif);
4063 }
4064 }
4065
4066 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4067 do_join = true;
4068
4069 /* Need to update the SSID (for filtering etc) */
4070 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4071 do_join = true;
4072
4073 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4074 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4075 bss_conf->enable_beacon ? "enabled" : "disabled");
4076
4077 do_join = true;
4078 }
4079
4080 if (changed & BSS_CHANGED_CQM) {
4081 bool enable = false;
4082 if (bss_conf->cqm_rssi_thold)
4083 enable = true;
4084 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4085 bss_conf->cqm_rssi_thold,
4086 bss_conf->cqm_rssi_hyst);
4087 if (ret < 0)
4088 goto out;
4089 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4090 }
4091
4092 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4093 BSS_CHANGED_ASSOC)) {
4094 rcu_read_lock();
4095 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4096 if (sta) {
4097 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4098
4099 /* save the supp_rates of the ap */
4100 sta_rate_set = sta->supp_rates[wlvif->band];
4101 if (sta->ht_cap.ht_supported)
4102 sta_rate_set |=
4103 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4104 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4105 sta_ht_cap = sta->ht_cap;
4106 sta_exists = true;
4107 }
4108
4109 rcu_read_unlock();
4110 }
4111
4112 if (changed & BSS_CHANGED_BSSID) {
4113 if (!is_zero_ether_addr(bss_conf->bssid)) {
4114 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4115 sta_rate_set);
4116 if (ret < 0)
4117 goto out;
4118
4119 /* Need to update the BSSID (for filtering etc) */
4120 do_join = true;
4121 } else {
4122 ret = wlcore_clear_bssid(wl, wlvif);
4123 if (ret < 0)
4124 goto out;
4125 }
4126 }
4127
4128 if (changed & BSS_CHANGED_IBSS) {
4129 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4130 bss_conf->ibss_joined);
4131
4132 if (bss_conf->ibss_joined) {
4133 u32 rates = bss_conf->basic_rates;
4134 wlvif->basic_rate_set =
4135 wl1271_tx_enabled_rates_get(wl, rates,
4136 wlvif->band);
4137 wlvif->basic_rate =
4138 wl1271_tx_min_rate_get(wl,
4139 wlvif->basic_rate_set);
4140
4141 /* by default, use 11b + OFDM rates */
4142 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4143 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4144 if (ret < 0)
4145 goto out;
4146 }
4147 }
4148
4149 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4150 if (ret < 0)
4151 goto out;
4152
4153 if (do_join) {
4154 ret = wlcore_join(wl, wlvif);
4155 if (ret < 0) {
4156 wl1271_warning("cmd join failed %d", ret);
4157 goto out;
4158 }
4159 }
4160
4161 if (changed & BSS_CHANGED_ASSOC) {
4162 if (bss_conf->assoc) {
4163 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4164 sta_rate_set);
4165 if (ret < 0)
4166 goto out;
4167
4168 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4169 wl12xx_set_authorized(wl, wlvif);
4170 } else {
4171 wlcore_unset_assoc(wl, wlvif);
4172 }
4173 }
4174
4175 if (changed & BSS_CHANGED_PS) {
4176 if ((bss_conf->ps) &&
4177 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4178 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4179 int ps_mode;
4180 char *ps_mode_str;
4181
4182 if (wl->conf.conn.forced_ps) {
4183 ps_mode = STATION_POWER_SAVE_MODE;
4184 ps_mode_str = "forced";
4185 } else {
4186 ps_mode = STATION_AUTO_PS_MODE;
4187 ps_mode_str = "auto";
4188 }
4189
4190 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4191
4192 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4193 if (ret < 0)
4194 wl1271_warning("enter %s ps failed %d",
4195 ps_mode_str, ret);
4196 } else if (!bss_conf->ps &&
4197 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4198 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4199
4200 ret = wl1271_ps_set_mode(wl, wlvif,
4201 STATION_ACTIVE_MODE);
4202 if (ret < 0)
4203 wl1271_warning("exit auto ps failed %d", ret);
4204 }
4205 }
4206
4207 /* Handle new association with HT. Do this after join. */
4208 if (sta_exists &&
4209 (changed & BSS_CHANGED_HT)) {
4210 bool enabled =
4211 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4212
4213 ret = wlcore_hw_set_peer_cap(wl,
4214 &sta_ht_cap,
4215 enabled,
4216 wlvif->rate_set,
4217 wlvif->sta.hlid);
4218 if (ret < 0) {
4219 wl1271_warning("Set ht cap failed %d", ret);
4220 goto out;
4221
4222 }
4223
4224 if (enabled) {
4225 ret = wl1271_acx_set_ht_information(wl, wlvif,
4226 bss_conf->ht_operation_mode);
4227 if (ret < 0) {
4228 wl1271_warning("Set ht information failed %d",
4229 ret);
4230 goto out;
4231 }
4232 }
4233 }
4234
4235 /* Handle arp filtering. Done after join. */
4236 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4237 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4238 __be32 addr = bss_conf->arp_addr_list[0];
4239 wlvif->sta.qos = bss_conf->qos;
4240 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4241
4242 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4243 wlvif->ip_addr = addr;
4244 /*
4245 * The template should have been configured only upon
4246 * association. however, it seems that the correct ip
4247 * isn't being set (when sending), so we have to
4248 * reconfigure the template upon every ip change.
4249 */
4250 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4251 if (ret < 0) {
4252 wl1271_warning("build arp rsp failed: %d", ret);
4253 goto out;
4254 }
4255
4256 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4257 (ACX_ARP_FILTER_ARP_FILTERING |
4258 ACX_ARP_FILTER_AUTO_ARP),
4259 addr);
4260 } else {
4261 wlvif->ip_addr = 0;
4262 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4263 }
4264
4265 if (ret < 0)
4266 goto out;
4267 }
4268
4269 out:
4270 return;
4271 }
4272
4273 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4274 struct ieee80211_vif *vif,
4275 struct ieee80211_bss_conf *bss_conf,
4276 u32 changed)
4277 {
4278 struct wl1271 *wl = hw->priv;
4279 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4280 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4281 int ret;
4282
4283 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4284 wlvif->role_id, (int)changed);
4285
4286 /*
4287 * make sure to cancel pending disconnections if our association
4288 * state changed
4289 */
4290 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4291 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4292
4293 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4294 !bss_conf->enable_beacon)
4295 wl1271_tx_flush(wl);
4296
4297 mutex_lock(&wl->mutex);
4298
4299 if (unlikely(wl->state != WLCORE_STATE_ON))
4300 goto out;
4301
4302 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4303 goto out;
4304
4305 ret = wl1271_ps_elp_wakeup(wl);
4306 if (ret < 0)
4307 goto out;
4308
4309 if (is_ap)
4310 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4311 else
4312 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4313
4314 wl1271_ps_elp_sleep(wl);
4315
4316 out:
4317 mutex_unlock(&wl->mutex);
4318 }
4319
4320 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4321 struct ieee80211_chanctx_conf *ctx)
4322 {
4323 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4324 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4325 cfg80211_get_chandef_type(&ctx->def));
4326 return 0;
4327 }
4328
4329 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4330 struct ieee80211_chanctx_conf *ctx)
4331 {
4332 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4333 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4334 cfg80211_get_chandef_type(&ctx->def));
4335 }
4336
4337 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4338 struct ieee80211_chanctx_conf *ctx,
4339 u32 changed)
4340 {
4341 wl1271_debug(DEBUG_MAC80211,
4342 "mac80211 change chanctx %d (type %d) changed 0x%x",
4343 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4344 cfg80211_get_chandef_type(&ctx->def), changed);
4345 }
4346
4347 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4348 struct ieee80211_vif *vif,
4349 struct ieee80211_chanctx_conf *ctx)
4350 {
4351 struct wl1271 *wl = hw->priv;
4352 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4353 int channel = ieee80211_frequency_to_channel(
4354 ctx->def.chan->center_freq);
4355
4356 wl1271_debug(DEBUG_MAC80211,
4357 "mac80211 assign chanctx (role %d) %d (type %d)",
4358 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4359
4360 mutex_lock(&wl->mutex);
4361
4362 wlvif->band = ctx->def.chan->band;
4363 wlvif->channel = channel;
4364 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4365
4366 /* update default rates according to the band */
4367 wl1271_set_band_rate(wl, wlvif);
4368
4369 mutex_unlock(&wl->mutex);
4370
4371 return 0;
4372 }
4373
4374 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4375 struct ieee80211_vif *vif,
4376 struct ieee80211_chanctx_conf *ctx)
4377 {
4378 struct wl1271 *wl = hw->priv;
4379 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4380
4381 wl1271_debug(DEBUG_MAC80211,
4382 "mac80211 unassign chanctx (role %d) %d (type %d)",
4383 wlvif->role_id,
4384 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4385 cfg80211_get_chandef_type(&ctx->def));
4386
4387 wl1271_tx_flush(wl);
4388 }
4389
4390 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4391 struct ieee80211_vif *vif, u16 queue,
4392 const struct ieee80211_tx_queue_params *params)
4393 {
4394 struct wl1271 *wl = hw->priv;
4395 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4396 u8 ps_scheme;
4397 int ret = 0;
4398
4399 mutex_lock(&wl->mutex);
4400
4401 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4402
4403 if (params->uapsd)
4404 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4405 else
4406 ps_scheme = CONF_PS_SCHEME_LEGACY;
4407
4408 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4409 goto out;
4410
4411 ret = wl1271_ps_elp_wakeup(wl);
4412 if (ret < 0)
4413 goto out;
4414
4415 /*
4416 * the txop is confed in units of 32us by the mac80211,
4417 * we need us
4418 */
4419 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4420 params->cw_min, params->cw_max,
4421 params->aifs, params->txop << 5);
4422 if (ret < 0)
4423 goto out_sleep;
4424
4425 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4426 CONF_CHANNEL_TYPE_EDCF,
4427 wl1271_tx_get_queue(queue),
4428 ps_scheme, CONF_ACK_POLICY_LEGACY,
4429 0, 0);
4430
4431 out_sleep:
4432 wl1271_ps_elp_sleep(wl);
4433
4434 out:
4435 mutex_unlock(&wl->mutex);
4436
4437 return ret;
4438 }
4439
4440 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4441 struct ieee80211_vif *vif)
4442 {
4443
4444 struct wl1271 *wl = hw->priv;
4445 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4446 u64 mactime = ULLONG_MAX;
4447 int ret;
4448
4449 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4450
4451 mutex_lock(&wl->mutex);
4452
4453 if (unlikely(wl->state != WLCORE_STATE_ON))
4454 goto out;
4455
4456 ret = wl1271_ps_elp_wakeup(wl);
4457 if (ret < 0)
4458 goto out;
4459
4460 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4461 if (ret < 0)
4462 goto out_sleep;
4463
4464 out_sleep:
4465 wl1271_ps_elp_sleep(wl);
4466
4467 out:
4468 mutex_unlock(&wl->mutex);
4469 return mactime;
4470 }
4471
4472 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4473 struct survey_info *survey)
4474 {
4475 struct ieee80211_conf *conf = &hw->conf;
4476
4477 if (idx != 0)
4478 return -ENOENT;
4479
4480 survey->channel = conf->channel;
4481 survey->filled = 0;
4482 return 0;
4483 }
4484
4485 static int wl1271_allocate_sta(struct wl1271 *wl,
4486 struct wl12xx_vif *wlvif,
4487 struct ieee80211_sta *sta)
4488 {
4489 struct wl1271_station *wl_sta;
4490 int ret;
4491
4492
4493 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4494 wl1271_warning("could not allocate HLID - too much stations");
4495 return -EBUSY;
4496 }
4497
4498 wl_sta = (struct wl1271_station *)sta->drv_priv;
4499 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4500 if (ret < 0) {
4501 wl1271_warning("could not allocate HLID - too many links");
4502 return -EBUSY;
4503 }
4504
4505 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4506 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4507 wl->active_sta_count++;
4508 return 0;
4509 }
4510
4511 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4512 {
4513 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4514 return;
4515
4516 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4517 __clear_bit(hlid, &wl->ap_ps_map);
4518 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4519 wl12xx_free_link(wl, wlvif, &hlid);
4520 wl->active_sta_count--;
4521
4522 /*
4523 * rearm the tx watchdog when the last STA is freed - give the FW a
4524 * chance to return STA-buffered packets before complaining.
4525 */
4526 if (wl->active_sta_count == 0)
4527 wl12xx_rearm_tx_watchdog_locked(wl);
4528 }
4529
4530 static int wl12xx_sta_add(struct wl1271 *wl,
4531 struct wl12xx_vif *wlvif,
4532 struct ieee80211_sta *sta)
4533 {
4534 struct wl1271_station *wl_sta;
4535 int ret = 0;
4536 u8 hlid;
4537
4538 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4539
4540 ret = wl1271_allocate_sta(wl, wlvif, sta);
4541 if (ret < 0)
4542 return ret;
4543
4544 wl_sta = (struct wl1271_station *)sta->drv_priv;
4545 hlid = wl_sta->hlid;
4546
4547 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4548 if (ret < 0)
4549 wl1271_free_sta(wl, wlvif, hlid);
4550
4551 return ret;
4552 }
4553
4554 static int wl12xx_sta_remove(struct wl1271 *wl,
4555 struct wl12xx_vif *wlvif,
4556 struct ieee80211_sta *sta)
4557 {
4558 struct wl1271_station *wl_sta;
4559 int ret = 0, id;
4560
4561 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4562
4563 wl_sta = (struct wl1271_station *)sta->drv_priv;
4564 id = wl_sta->hlid;
4565 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4566 return -EINVAL;
4567
4568 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4569 if (ret < 0)
4570 return ret;
4571
4572 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4573 return ret;
4574 }
4575
4576 static void wlcore_roc_if_possible(struct wl1271 *wl,
4577 struct wl12xx_vif *wlvif)
4578 {
4579 if (find_first_bit(wl->roc_map,
4580 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4581 return;
4582
4583 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4584 return;
4585
4586 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4587 }
4588
4589 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4590 struct wl12xx_vif *wlvif,
4591 struct wl1271_station *wl_sta,
4592 bool in_connection)
4593 {
4594 if (in_connection) {
4595 if (WARN_ON(wl_sta->in_connection))
4596 return;
4597 wl_sta->in_connection = true;
4598 if (!wlvif->inconn_count++)
4599 wlcore_roc_if_possible(wl, wlvif);
4600 } else {
4601 if (!wl_sta->in_connection)
4602 return;
4603
4604 wl_sta->in_connection = false;
4605 wlvif->inconn_count--;
4606 if (WARN_ON(wlvif->inconn_count < 0))
4607 return;
4608
4609 if (!wlvif->inconn_count)
4610 if (test_bit(wlvif->role_id, wl->roc_map))
4611 wl12xx_croc(wl, wlvif->role_id);
4612 }
4613 }
4614
4615 static int wl12xx_update_sta_state(struct wl1271 *wl,
4616 struct wl12xx_vif *wlvif,
4617 struct ieee80211_sta *sta,
4618 enum ieee80211_sta_state old_state,
4619 enum ieee80211_sta_state new_state)
4620 {
4621 struct wl1271_station *wl_sta;
4622 u8 hlid;
4623 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4624 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4625 int ret;
4626
4627 wl_sta = (struct wl1271_station *)sta->drv_priv;
4628 hlid = wl_sta->hlid;
4629
4630 /* Add station (AP mode) */
4631 if (is_ap &&
4632 old_state == IEEE80211_STA_NOTEXIST &&
4633 new_state == IEEE80211_STA_NONE) {
4634 ret = wl12xx_sta_add(wl, wlvif, sta);
4635 if (ret)
4636 return ret;
4637
4638 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4639 }
4640
4641 /* Remove station (AP mode) */
4642 if (is_ap &&
4643 old_state == IEEE80211_STA_NONE &&
4644 new_state == IEEE80211_STA_NOTEXIST) {
4645 /* must not fail */
4646 wl12xx_sta_remove(wl, wlvif, sta);
4647
4648 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4649 }
4650
4651 /* Authorize station (AP mode) */
4652 if (is_ap &&
4653 new_state == IEEE80211_STA_AUTHORIZED) {
4654 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4655 if (ret < 0)
4656 return ret;
4657
4658 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4659 hlid);
4660 if (ret)
4661 return ret;
4662
4663 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4664 }
4665
4666 /* Authorize station */
4667 if (is_sta &&
4668 new_state == IEEE80211_STA_AUTHORIZED) {
4669 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4670 ret = wl12xx_set_authorized(wl, wlvif);
4671 if (ret)
4672 return ret;
4673 }
4674
4675 if (is_sta &&
4676 old_state == IEEE80211_STA_AUTHORIZED &&
4677 new_state == IEEE80211_STA_ASSOC) {
4678 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4679 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4680 }
4681
4682 /* clear ROCs on failure or authorization */
4683 if (is_sta &&
4684 (new_state == IEEE80211_STA_AUTHORIZED ||
4685 new_state == IEEE80211_STA_NOTEXIST)) {
4686 if (test_bit(wlvif->role_id, wl->roc_map))
4687 wl12xx_croc(wl, wlvif->role_id);
4688 }
4689
4690 if (is_sta &&
4691 old_state == IEEE80211_STA_NOTEXIST &&
4692 new_state == IEEE80211_STA_NONE) {
4693 if (find_first_bit(wl->roc_map,
4694 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4695 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4696 wl12xx_roc(wl, wlvif, wlvif->role_id,
4697 wlvif->band, wlvif->channel);
4698 }
4699 }
4700 return 0;
4701 }
4702
4703 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4704 struct ieee80211_vif *vif,
4705 struct ieee80211_sta *sta,
4706 enum ieee80211_sta_state old_state,
4707 enum ieee80211_sta_state new_state)
4708 {
4709 struct wl1271 *wl = hw->priv;
4710 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4711 int ret;
4712
4713 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4714 sta->aid, old_state, new_state);
4715
4716 mutex_lock(&wl->mutex);
4717
4718 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4719 ret = -EBUSY;
4720 goto out;
4721 }
4722
4723 ret = wl1271_ps_elp_wakeup(wl);
4724 if (ret < 0)
4725 goto out;
4726
4727 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4728
4729 wl1271_ps_elp_sleep(wl);
4730 out:
4731 mutex_unlock(&wl->mutex);
4732 if (new_state < old_state)
4733 return 0;
4734 return ret;
4735 }
4736
4737 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4738 struct ieee80211_vif *vif,
4739 enum ieee80211_ampdu_mlme_action action,
4740 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4741 u8 buf_size)
4742 {
4743 struct wl1271 *wl = hw->priv;
4744 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4745 int ret;
4746 u8 hlid, *ba_bitmap;
4747
4748 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4749 tid);
4750
4751 /* sanity check - the fields in FW are only 8bits wide */
4752 if (WARN_ON(tid > 0xFF))
4753 return -ENOTSUPP;
4754
4755 mutex_lock(&wl->mutex);
4756
4757 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4758 ret = -EAGAIN;
4759 goto out;
4760 }
4761
4762 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4763 hlid = wlvif->sta.hlid;
4764 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4765 struct wl1271_station *wl_sta;
4766
4767 wl_sta = (struct wl1271_station *)sta->drv_priv;
4768 hlid = wl_sta->hlid;
4769 } else {
4770 ret = -EINVAL;
4771 goto out;
4772 }
4773
4774 ba_bitmap = &wl->links[hlid].ba_bitmap;
4775
4776 ret = wl1271_ps_elp_wakeup(wl);
4777 if (ret < 0)
4778 goto out;
4779
4780 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4781 tid, action);
4782
4783 switch (action) {
4784 case IEEE80211_AMPDU_RX_START:
4785 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4786 ret = -ENOTSUPP;
4787 break;
4788 }
4789
4790 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4791 ret = -EBUSY;
4792 wl1271_error("exceeded max RX BA sessions");
4793 break;
4794 }
4795
4796 if (*ba_bitmap & BIT(tid)) {
4797 ret = -EINVAL;
4798 wl1271_error("cannot enable RX BA session on active "
4799 "tid: %d", tid);
4800 break;
4801 }
4802
4803 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4804 hlid);
4805 if (!ret) {
4806 *ba_bitmap |= BIT(tid);
4807 wl->ba_rx_session_count++;
4808 }
4809 break;
4810
4811 case IEEE80211_AMPDU_RX_STOP:
4812 if (!(*ba_bitmap & BIT(tid))) {
4813 /*
4814 * this happens on reconfig - so only output a debug
4815 * message for now, and don't fail the function.
4816 */
4817 wl1271_debug(DEBUG_MAC80211,
4818 "no active RX BA session on tid: %d",
4819 tid);
4820 ret = 0;
4821 break;
4822 }
4823
4824 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4825 hlid);
4826 if (!ret) {
4827 *ba_bitmap &= ~BIT(tid);
4828 wl->ba_rx_session_count--;
4829 }
4830 break;
4831
4832 /*
4833 * The BA initiator session management in FW independently.
4834 * Falling break here on purpose for all TX APDU commands.
4835 */
4836 case IEEE80211_AMPDU_TX_START:
4837 case IEEE80211_AMPDU_TX_STOP_CONT:
4838 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4839 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4840 case IEEE80211_AMPDU_TX_OPERATIONAL:
4841 ret = -EINVAL;
4842 break;
4843
4844 default:
4845 wl1271_error("Incorrect ampdu action id=%x\n", action);
4846 ret = -EINVAL;
4847 }
4848
4849 wl1271_ps_elp_sleep(wl);
4850
4851 out:
4852 mutex_unlock(&wl->mutex);
4853
4854 return ret;
4855 }
4856
4857 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4858 struct ieee80211_vif *vif,
4859 const struct cfg80211_bitrate_mask *mask)
4860 {
4861 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4862 struct wl1271 *wl = hw->priv;
4863 int i, ret = 0;
4864
4865 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4866 mask->control[NL80211_BAND_2GHZ].legacy,
4867 mask->control[NL80211_BAND_5GHZ].legacy);
4868
4869 mutex_lock(&wl->mutex);
4870
4871 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4872 wlvif->bitrate_masks[i] =
4873 wl1271_tx_enabled_rates_get(wl,
4874 mask->control[i].legacy,
4875 i);
4876
4877 if (unlikely(wl->state != WLCORE_STATE_ON))
4878 goto out;
4879
4880 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4881 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4882
4883 ret = wl1271_ps_elp_wakeup(wl);
4884 if (ret < 0)
4885 goto out;
4886
4887 wl1271_set_band_rate(wl, wlvif);
4888 wlvif->basic_rate =
4889 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4890 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4891
4892 wl1271_ps_elp_sleep(wl);
4893 }
4894 out:
4895 mutex_unlock(&wl->mutex);
4896
4897 return ret;
4898 }
4899
4900 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4901 struct ieee80211_channel_switch *ch_switch)
4902 {
4903 struct wl1271 *wl = hw->priv;
4904 struct wl12xx_vif *wlvif;
4905 int ret;
4906
4907 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4908
4909 wl1271_tx_flush(wl);
4910
4911 mutex_lock(&wl->mutex);
4912
4913 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4914 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4915 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4916 ieee80211_chswitch_done(vif, false);
4917 }
4918 goto out;
4919 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4920 goto out;
4921 }
4922
4923 ret = wl1271_ps_elp_wakeup(wl);
4924 if (ret < 0)
4925 goto out;
4926
4927 /* TODO: change mac80211 to pass vif as param */
4928 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4929 unsigned long delay_usec;
4930
4931 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4932 if (ret)
4933 goto out_sleep;
4934
4935 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4936
4937 /* indicate failure 5 seconds after channel switch time */
4938 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4939 ch_switch->count;
4940 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4941 usecs_to_jiffies(delay_usec) +
4942 msecs_to_jiffies(5000));
4943 }
4944
4945 out_sleep:
4946 wl1271_ps_elp_sleep(wl);
4947
4948 out:
4949 mutex_unlock(&wl->mutex);
4950 }
4951
4952 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4953 {
4954 struct wl1271 *wl = hw->priv;
4955
4956 wl1271_tx_flush(wl);
4957 }
4958
4959 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4960 struct ieee80211_vif *vif,
4961 struct ieee80211_channel *chan,
4962 int duration)
4963 {
4964 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4965 struct wl1271 *wl = hw->priv;
4966 int channel, ret = 0;
4967
4968 channel = ieee80211_frequency_to_channel(chan->center_freq);
4969
4970 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4971 channel, wlvif->role_id);
4972
4973 mutex_lock(&wl->mutex);
4974
4975 if (unlikely(wl->state != WLCORE_STATE_ON))
4976 goto out;
4977
4978 /* return EBUSY if we can't ROC right now */
4979 if (WARN_ON(wl->roc_vif ||
4980 find_first_bit(wl->roc_map,
4981 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4982 ret = -EBUSY;
4983 goto out;
4984 }
4985
4986 ret = wl1271_ps_elp_wakeup(wl);
4987 if (ret < 0)
4988 goto out;
4989
4990 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4991 if (ret < 0)
4992 goto out_sleep;
4993
4994 wl->roc_vif = vif;
4995 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4996 msecs_to_jiffies(duration));
4997 out_sleep:
4998 wl1271_ps_elp_sleep(wl);
4999 out:
5000 mutex_unlock(&wl->mutex);
5001 return ret;
5002 }
5003
5004 static int __wlcore_roc_completed(struct wl1271 *wl)
5005 {
5006 struct wl12xx_vif *wlvif;
5007 int ret;
5008
5009 /* already completed */
5010 if (unlikely(!wl->roc_vif))
5011 return 0;
5012
5013 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5014
5015 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5016 return -EBUSY;
5017
5018 ret = wl12xx_stop_dev(wl, wlvif);
5019 if (ret < 0)
5020 return ret;
5021
5022 wl->roc_vif = NULL;
5023
5024 return 0;
5025 }
5026
5027 static int wlcore_roc_completed(struct wl1271 *wl)
5028 {
5029 int ret;
5030
5031 wl1271_debug(DEBUG_MAC80211, "roc complete");
5032
5033 mutex_lock(&wl->mutex);
5034
5035 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5036 ret = -EBUSY;
5037 goto out;
5038 }
5039
5040 ret = wl1271_ps_elp_wakeup(wl);
5041 if (ret < 0)
5042 goto out;
5043
5044 ret = __wlcore_roc_completed(wl);
5045
5046 wl1271_ps_elp_sleep(wl);
5047 out:
5048 mutex_unlock(&wl->mutex);
5049
5050 return ret;
5051 }
5052
5053 static void wlcore_roc_complete_work(struct work_struct *work)
5054 {
5055 struct delayed_work *dwork;
5056 struct wl1271 *wl;
5057 int ret;
5058
5059 dwork = container_of(work, struct delayed_work, work);
5060 wl = container_of(dwork, struct wl1271, roc_complete_work);
5061
5062 ret = wlcore_roc_completed(wl);
5063 if (!ret)
5064 ieee80211_remain_on_channel_expired(wl->hw);
5065 }
5066
5067 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5068 {
5069 struct wl1271 *wl = hw->priv;
5070
5071 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5072
5073 /* TODO: per-vif */
5074 wl1271_tx_flush(wl);
5075
5076 /*
5077 * we can't just flush_work here, because it might deadlock
5078 * (as we might get called from the same workqueue)
5079 */
5080 cancel_delayed_work_sync(&wl->roc_complete_work);
5081 wlcore_roc_completed(wl);
5082
5083 return 0;
5084 }
5085
5086 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5087 struct ieee80211_vif *vif,
5088 struct ieee80211_sta *sta,
5089 u32 changed)
5090 {
5091 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5092 struct wl1271 *wl = hw->priv;
5093
5094 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5095 }
5096
5097 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5098 {
5099 struct wl1271 *wl = hw->priv;
5100 bool ret = false;
5101
5102 mutex_lock(&wl->mutex);
5103
5104 if (unlikely(wl->state != WLCORE_STATE_ON))
5105 goto out;
5106
5107 /* packets are considered pending if in the TX queue or the FW */
5108 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5109 out:
5110 mutex_unlock(&wl->mutex);
5111
5112 return ret;
5113 }
5114
5115 /* can't be const, mac80211 writes to this */
5116 static struct ieee80211_rate wl1271_rates[] = {
5117 { .bitrate = 10,
5118 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5119 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5120 { .bitrate = 20,
5121 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5122 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5123 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5124 { .bitrate = 55,
5125 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5126 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5127 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5128 { .bitrate = 110,
5129 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5130 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5131 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5132 { .bitrate = 60,
5133 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5134 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5135 { .bitrate = 90,
5136 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5137 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5138 { .bitrate = 120,
5139 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5140 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5141 { .bitrate = 180,
5142 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5143 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5144 { .bitrate = 240,
5145 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5146 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5147 { .bitrate = 360,
5148 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5149 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5150 { .bitrate = 480,
5151 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5152 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5153 { .bitrate = 540,
5154 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5155 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5156 };
5157
5158 /* can't be const, mac80211 writes to this */
5159 static struct ieee80211_channel wl1271_channels[] = {
5160 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5161 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5162 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5163 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5164 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5165 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5166 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5167 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5168 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5169 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5170 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5171 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5172 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5173 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5174 };
5175
5176 /* can't be const, mac80211 writes to this */
5177 static struct ieee80211_supported_band wl1271_band_2ghz = {
5178 .channels = wl1271_channels,
5179 .n_channels = ARRAY_SIZE(wl1271_channels),
5180 .bitrates = wl1271_rates,
5181 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5182 };
5183
5184 /* 5 GHz data rates for WL1273 */
5185 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5186 { .bitrate = 60,
5187 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5188 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5189 { .bitrate = 90,
5190 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5191 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5192 { .bitrate = 120,
5193 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5194 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5195 { .bitrate = 180,
5196 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5197 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5198 { .bitrate = 240,
5199 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5200 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5201 { .bitrate = 360,
5202 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5203 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5204 { .bitrate = 480,
5205 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5206 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5207 { .bitrate = 540,
5208 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5209 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5210 };
5211
5212 /* 5 GHz band channels for WL1273 */
5213 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5214 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5215 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5216 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5217 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5218 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5219 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5220 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5221 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5222 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5223 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5224 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5225 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5226 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5227 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5228 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5229 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5230 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5231 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5232 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5233 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5234 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5235 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5236 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5237 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5238 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5239 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5240 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5241 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5242 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5243 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5244 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5245 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5246 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5247 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5248 };
5249
5250 static struct ieee80211_supported_band wl1271_band_5ghz = {
5251 .channels = wl1271_channels_5ghz,
5252 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5253 .bitrates = wl1271_rates_5ghz,
5254 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5255 };
5256
5257 static const struct ieee80211_ops wl1271_ops = {
5258 .start = wl1271_op_start,
5259 .stop = wlcore_op_stop,
5260 .add_interface = wl1271_op_add_interface,
5261 .remove_interface = wl1271_op_remove_interface,
5262 .change_interface = wl12xx_op_change_interface,
5263 #ifdef CONFIG_PM
5264 .suspend = wl1271_op_suspend,
5265 .resume = wl1271_op_resume,
5266 #endif
5267 .config = wl1271_op_config,
5268 .prepare_multicast = wl1271_op_prepare_multicast,
5269 .configure_filter = wl1271_op_configure_filter,
5270 .tx = wl1271_op_tx,
5271 .set_key = wlcore_op_set_key,
5272 .hw_scan = wl1271_op_hw_scan,
5273 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5274 .sched_scan_start = wl1271_op_sched_scan_start,
5275 .sched_scan_stop = wl1271_op_sched_scan_stop,
5276 .bss_info_changed = wl1271_op_bss_info_changed,
5277 .set_frag_threshold = wl1271_op_set_frag_threshold,
5278 .set_rts_threshold = wl1271_op_set_rts_threshold,
5279 .conf_tx = wl1271_op_conf_tx,
5280 .get_tsf = wl1271_op_get_tsf,
5281 .get_survey = wl1271_op_get_survey,
5282 .sta_state = wl12xx_op_sta_state,
5283 .ampdu_action = wl1271_op_ampdu_action,
5284 .tx_frames_pending = wl1271_tx_frames_pending,
5285 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5286 .channel_switch = wl12xx_op_channel_switch,
5287 .flush = wlcore_op_flush,
5288 .remain_on_channel = wlcore_op_remain_on_channel,
5289 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5290 .add_chanctx = wlcore_op_add_chanctx,
5291 .remove_chanctx = wlcore_op_remove_chanctx,
5292 .change_chanctx = wlcore_op_change_chanctx,
5293 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5294 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5295 .sta_rc_update = wlcore_op_sta_rc_update,
5296 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5297 };
5298
5299
5300 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5301 {
5302 u8 idx;
5303
5304 BUG_ON(band >= 2);
5305
5306 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5307 wl1271_error("Illegal RX rate from HW: %d", rate);
5308 return 0;
5309 }
5310
5311 idx = wl->band_rate_to_idx[band][rate];
5312 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5313 wl1271_error("Unsupported RX rate from HW: %d", rate);
5314 return 0;
5315 }
5316
5317 return idx;
5318 }
5319
5320 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5321 struct device_attribute *attr,
5322 char *buf)
5323 {
5324 struct wl1271 *wl = dev_get_drvdata(dev);
5325 ssize_t len;
5326
5327 len = PAGE_SIZE;
5328
5329 mutex_lock(&wl->mutex);
5330 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5331 wl->sg_enabled);
5332 mutex_unlock(&wl->mutex);
5333
5334 return len;
5335
5336 }
5337
5338 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5339 struct device_attribute *attr,
5340 const char *buf, size_t count)
5341 {
5342 struct wl1271 *wl = dev_get_drvdata(dev);
5343 unsigned long res;
5344 int ret;
5345
5346 ret = kstrtoul(buf, 10, &res);
5347 if (ret < 0) {
5348 wl1271_warning("incorrect value written to bt_coex_mode");
5349 return count;
5350 }
5351
5352 mutex_lock(&wl->mutex);
5353
5354 res = !!res;
5355
5356 if (res == wl->sg_enabled)
5357 goto out;
5358
5359 wl->sg_enabled = res;
5360
5361 if (unlikely(wl->state != WLCORE_STATE_ON))
5362 goto out;
5363
5364 ret = wl1271_ps_elp_wakeup(wl);
5365 if (ret < 0)
5366 goto out;
5367
5368 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5369 wl1271_ps_elp_sleep(wl);
5370
5371 out:
5372 mutex_unlock(&wl->mutex);
5373 return count;
5374 }
5375
5376 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5377 wl1271_sysfs_show_bt_coex_state,
5378 wl1271_sysfs_store_bt_coex_state);
5379
5380 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5381 struct device_attribute *attr,
5382 char *buf)
5383 {
5384 struct wl1271 *wl = dev_get_drvdata(dev);
5385 ssize_t len;
5386
5387 len = PAGE_SIZE;
5388
5389 mutex_lock(&wl->mutex);
5390 if (wl->hw_pg_ver >= 0)
5391 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5392 else
5393 len = snprintf(buf, len, "n/a\n");
5394 mutex_unlock(&wl->mutex);
5395
5396 return len;
5397 }
5398
5399 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5400 wl1271_sysfs_show_hw_pg_ver, NULL);
5401
5402 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5403 struct bin_attribute *bin_attr,
5404 char *buffer, loff_t pos, size_t count)
5405 {
5406 struct device *dev = container_of(kobj, struct device, kobj);
5407 struct wl1271 *wl = dev_get_drvdata(dev);
5408 ssize_t len;
5409 int ret;
5410
5411 ret = mutex_lock_interruptible(&wl->mutex);
5412 if (ret < 0)
5413 return -ERESTARTSYS;
5414
5415 /* Let only one thread read the log at a time, blocking others */
5416 while (wl->fwlog_size == 0) {
5417 DEFINE_WAIT(wait);
5418
5419 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5420 &wait,
5421 TASK_INTERRUPTIBLE);
5422
5423 if (wl->fwlog_size != 0) {
5424 finish_wait(&wl->fwlog_waitq, &wait);
5425 break;
5426 }
5427
5428 mutex_unlock(&wl->mutex);
5429
5430 schedule();
5431 finish_wait(&wl->fwlog_waitq, &wait);
5432
5433 if (signal_pending(current))
5434 return -ERESTARTSYS;
5435
5436 ret = mutex_lock_interruptible(&wl->mutex);
5437 if (ret < 0)
5438 return -ERESTARTSYS;
5439 }
5440
5441 /* Check if the fwlog is still valid */
5442 if (wl->fwlog_size < 0) {
5443 mutex_unlock(&wl->mutex);
5444 return 0;
5445 }
5446
5447 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5448 len = min(count, (size_t)wl->fwlog_size);
5449 wl->fwlog_size -= len;
5450 memcpy(buffer, wl->fwlog, len);
5451
5452 /* Make room for new messages */
5453 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5454
5455 mutex_unlock(&wl->mutex);
5456
5457 return len;
5458 }
5459
5460 static struct bin_attribute fwlog_attr = {
5461 .attr = {.name = "fwlog", .mode = S_IRUSR},
5462 .read = wl1271_sysfs_read_fwlog,
5463 };
5464
5465 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5466 {
5467 int i;
5468
5469 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5470 oui, nic);
5471
5472 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5473 wl1271_warning("NIC part of the MAC address wraps around!");
5474
5475 for (i = 0; i < wl->num_mac_addr; i++) {
5476 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5477 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5478 wl->addresses[i].addr[2] = (u8) oui;
5479 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5480 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5481 wl->addresses[i].addr[5] = (u8) nic;
5482 nic++;
5483 }
5484
5485 /* we may be one address short at the most */
5486 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5487
5488 /*
5489 * turn on the LAA bit in the first address and use it as
5490 * the last address.
5491 */
5492 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5493 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5494 memcpy(&wl->addresses[idx], &wl->addresses[0],
5495 sizeof(wl->addresses[0]));
5496 /* LAA bit */
5497 wl->addresses[idx].addr[2] |= BIT(1);
5498 }
5499
5500 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5501 wl->hw->wiphy->addresses = wl->addresses;
5502 }
5503
5504 static int wl12xx_get_hw_info(struct wl1271 *wl)
5505 {
5506 int ret;
5507
5508 ret = wl12xx_set_power_on(wl);
5509 if (ret < 0)
5510 return ret;
5511
5512 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5513 if (ret < 0)
5514 goto out;
5515
5516 wl->fuse_oui_addr = 0;
5517 wl->fuse_nic_addr = 0;
5518
5519 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5520 if (ret < 0)
5521 goto out;
5522
5523 if (wl->ops->get_mac)
5524 ret = wl->ops->get_mac(wl);
5525
5526 out:
5527 wl1271_power_off(wl);
5528 return ret;
5529 }
5530
5531 static int wl1271_register_hw(struct wl1271 *wl)
5532 {
5533 int ret;
5534 u32 oui_addr = 0, nic_addr = 0;
5535
5536 if (wl->mac80211_registered)
5537 return 0;
5538
5539 if (wl->nvs_len >= 12) {
5540 /* NOTE: The wl->nvs->nvs element must be first, in
5541 * order to simplify the casting, we assume it is at
5542 * the beginning of the wl->nvs structure.
5543 */
5544 u8 *nvs_ptr = (u8 *)wl->nvs;
5545
5546 oui_addr =
5547 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5548 nic_addr =
5549 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5550 }
5551
5552 /* if the MAC address is zeroed in the NVS derive from fuse */
5553 if (oui_addr == 0 && nic_addr == 0) {
5554 oui_addr = wl->fuse_oui_addr;
5555 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5556 nic_addr = wl->fuse_nic_addr + 1;
5557 }
5558
5559 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5560
5561 ret = ieee80211_register_hw(wl->hw);
5562 if (ret < 0) {
5563 wl1271_error("unable to register mac80211 hw: %d", ret);
5564 goto out;
5565 }
5566
5567 wl->mac80211_registered = true;
5568
5569 wl1271_debugfs_init(wl);
5570
5571 wl1271_notice("loaded");
5572
5573 out:
5574 return ret;
5575 }
5576
5577 static void wl1271_unregister_hw(struct wl1271 *wl)
5578 {
5579 if (wl->plt)
5580 wl1271_plt_stop(wl);
5581
5582 ieee80211_unregister_hw(wl->hw);
5583 wl->mac80211_registered = false;
5584
5585 }
5586
5587 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5588 {
5589 .max = 3,
5590 .types = BIT(NL80211_IFTYPE_STATION),
5591 },
5592 {
5593 .max = 1,
5594 .types = BIT(NL80211_IFTYPE_AP) |
5595 BIT(NL80211_IFTYPE_P2P_GO) |
5596 BIT(NL80211_IFTYPE_P2P_CLIENT),
5597 },
5598 };
5599
5600 static struct ieee80211_iface_combination
5601 wlcore_iface_combinations[] = {
5602 {
5603 .max_interfaces = 3,
5604 .limits = wlcore_iface_limits,
5605 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5606 },
5607 };
5608
5609 static int wl1271_init_ieee80211(struct wl1271 *wl)
5610 {
5611 int i;
5612 static const u32 cipher_suites[] = {
5613 WLAN_CIPHER_SUITE_WEP40,
5614 WLAN_CIPHER_SUITE_WEP104,
5615 WLAN_CIPHER_SUITE_TKIP,
5616 WLAN_CIPHER_SUITE_CCMP,
5617 WL1271_CIPHER_SUITE_GEM,
5618 };
5619
5620 /* The tx descriptor buffer */
5621 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5622
5623 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5624 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5625
5626 /* unit us */
5627 /* FIXME: find a proper value */
5628 wl->hw->channel_change_time = 10000;
5629 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5630
5631 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5632 IEEE80211_HW_SUPPORTS_PS |
5633 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5634 IEEE80211_HW_SUPPORTS_UAPSD |
5635 IEEE80211_HW_HAS_RATE_CONTROL |
5636 IEEE80211_HW_CONNECTION_MONITOR |
5637 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5638 IEEE80211_HW_SPECTRUM_MGMT |
5639 IEEE80211_HW_AP_LINK_PS |
5640 IEEE80211_HW_AMPDU_AGGREGATION |
5641 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5642 IEEE80211_HW_SCAN_WHILE_IDLE |
5643 IEEE80211_HW_QUEUE_CONTROL;
5644
5645 wl->hw->wiphy->cipher_suites = cipher_suites;
5646 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5647
5648 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5649 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5650 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5651 wl->hw->wiphy->max_scan_ssids = 1;
5652 wl->hw->wiphy->max_sched_scan_ssids = 16;
5653 wl->hw->wiphy->max_match_sets = 16;
5654 /*
5655 * Maximum length of elements in scanning probe request templates
5656 * should be the maximum length possible for a template, without
5657 * the IEEE80211 header of the template
5658 */
5659 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5660 sizeof(struct ieee80211_header);
5661
5662 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5663 sizeof(struct ieee80211_header);
5664
5665 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5666
5667 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5668 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5669
5670 /* make sure all our channels fit in the scanned_ch bitmask */
5671 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5672 ARRAY_SIZE(wl1271_channels_5ghz) >
5673 WL1271_MAX_CHANNELS);
5674 /*
5675 * clear channel flags from the previous usage
5676 * and restore max_power & max_antenna_gain values.
5677 */
5678 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5679 wl1271_band_2ghz.channels[i].flags = 0;
5680 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5681 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5682 }
5683
5684 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5685 wl1271_band_5ghz.channels[i].flags = 0;
5686 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5687 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5688 }
5689
5690 /*
5691 * We keep local copies of the band structs because we need to
5692 * modify them on a per-device basis.
5693 */
5694 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5695 sizeof(wl1271_band_2ghz));
5696 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5697 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5698 sizeof(*wl->ht_cap));
5699 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5700 sizeof(wl1271_band_5ghz));
5701 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5702 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5703 sizeof(*wl->ht_cap));
5704
5705 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5706 &wl->bands[IEEE80211_BAND_2GHZ];
5707 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5708 &wl->bands[IEEE80211_BAND_5GHZ];
5709
5710 /*
5711 * allow 4 queues per mac address we support +
5712 * 1 cab queue per mac + one global offchannel Tx queue
5713 */
5714 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5715
5716 /* the last queue is the offchannel queue */
5717 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5718 wl->hw->max_rates = 1;
5719
5720 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5721
5722 /* the FW answers probe-requests in AP-mode */
5723 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5724 wl->hw->wiphy->probe_resp_offload =
5725 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5726 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5727 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5728
5729 /* allowed interface combinations */
5730 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5731 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5732 wl->hw->wiphy->n_iface_combinations =
5733 ARRAY_SIZE(wlcore_iface_combinations);
5734
5735 SET_IEEE80211_DEV(wl->hw, wl->dev);
5736
5737 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5738 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5739
5740 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5741
5742 return 0;
5743 }
5744
5745 #define WL1271_DEFAULT_CHANNEL 0
5746
5747 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5748 u32 mbox_size)
5749 {
5750 struct ieee80211_hw *hw;
5751 struct wl1271 *wl;
5752 int i, j, ret;
5753 unsigned int order;
5754
5755 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5756
5757 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5758 if (!hw) {
5759 wl1271_error("could not alloc ieee80211_hw");
5760 ret = -ENOMEM;
5761 goto err_hw_alloc;
5762 }
5763
5764 wl = hw->priv;
5765 memset(wl, 0, sizeof(*wl));
5766
5767 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5768 if (!wl->priv) {
5769 wl1271_error("could not alloc wl priv");
5770 ret = -ENOMEM;
5771 goto err_priv_alloc;
5772 }
5773
5774 INIT_LIST_HEAD(&wl->wlvif_list);
5775
5776 wl->hw = hw;
5777
5778 for (i = 0; i < NUM_TX_QUEUES; i++)
5779 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5780 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5781
5782 skb_queue_head_init(&wl->deferred_rx_queue);
5783 skb_queue_head_init(&wl->deferred_tx_queue);
5784
5785 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5786 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5787 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5788 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5789 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5790 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5791 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5792
5793 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5794 if (!wl->freezable_wq) {
5795 ret = -ENOMEM;
5796 goto err_hw;
5797 }
5798
5799 wl->channel = WL1271_DEFAULT_CHANNEL;
5800 wl->rx_counter = 0;
5801 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5802 wl->band = IEEE80211_BAND_2GHZ;
5803 wl->channel_type = NL80211_CHAN_NO_HT;
5804 wl->flags = 0;
5805 wl->sg_enabled = true;
5806 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5807 wl->recovery_count = 0;
5808 wl->hw_pg_ver = -1;
5809 wl->ap_ps_map = 0;
5810 wl->ap_fw_ps_map = 0;
5811 wl->quirks = 0;
5812 wl->platform_quirks = 0;
5813 wl->system_hlid = WL12XX_SYSTEM_HLID;
5814 wl->active_sta_count = 0;
5815 wl->active_link_count = 0;
5816 wl->fwlog_size = 0;
5817 init_waitqueue_head(&wl->fwlog_waitq);
5818
5819 /* The system link is always allocated */
5820 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5821
5822 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5823 for (i = 0; i < wl->num_tx_desc; i++)
5824 wl->tx_frames[i] = NULL;
5825
5826 spin_lock_init(&wl->wl_lock);
5827
5828 wl->state = WLCORE_STATE_OFF;
5829 wl->fw_type = WL12XX_FW_TYPE_NONE;
5830 mutex_init(&wl->mutex);
5831 mutex_init(&wl->flush_mutex);
5832 init_completion(&wl->nvs_loading_complete);
5833
5834 order = get_order(aggr_buf_size);
5835 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5836 if (!wl->aggr_buf) {
5837 ret = -ENOMEM;
5838 goto err_wq;
5839 }
5840 wl->aggr_buf_size = aggr_buf_size;
5841
5842 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5843 if (!wl->dummy_packet) {
5844 ret = -ENOMEM;
5845 goto err_aggr;
5846 }
5847
5848 /* Allocate one page for the FW log */
5849 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5850 if (!wl->fwlog) {
5851 ret = -ENOMEM;
5852 goto err_dummy_packet;
5853 }
5854
5855 wl->mbox_size = mbox_size;
5856 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5857 if (!wl->mbox) {
5858 ret = -ENOMEM;
5859 goto err_fwlog;
5860 }
5861
5862 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5863 if (!wl->buffer_32) {
5864 ret = -ENOMEM;
5865 goto err_mbox;
5866 }
5867
5868 return hw;
5869
5870 err_mbox:
5871 kfree(wl->mbox);
5872
5873 err_fwlog:
5874 free_page((unsigned long)wl->fwlog);
5875
5876 err_dummy_packet:
5877 dev_kfree_skb(wl->dummy_packet);
5878
5879 err_aggr:
5880 free_pages((unsigned long)wl->aggr_buf, order);
5881
5882 err_wq:
5883 destroy_workqueue(wl->freezable_wq);
5884
5885 err_hw:
5886 wl1271_debugfs_exit(wl);
5887 kfree(wl->priv);
5888
5889 err_priv_alloc:
5890 ieee80211_free_hw(hw);
5891
5892 err_hw_alloc:
5893
5894 return ERR_PTR(ret);
5895 }
5896 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5897
5898 int wlcore_free_hw(struct wl1271 *wl)
5899 {
5900 /* Unblock any fwlog readers */
5901 mutex_lock(&wl->mutex);
5902 wl->fwlog_size = -1;
5903 wake_up_interruptible_all(&wl->fwlog_waitq);
5904 mutex_unlock(&wl->mutex);
5905
5906 device_remove_bin_file(wl->dev, &fwlog_attr);
5907
5908 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5909
5910 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5911 kfree(wl->buffer_32);
5912 kfree(wl->mbox);
5913 free_page((unsigned long)wl->fwlog);
5914 dev_kfree_skb(wl->dummy_packet);
5915 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5916
5917 wl1271_debugfs_exit(wl);
5918
5919 vfree(wl->fw);
5920 wl->fw = NULL;
5921 wl->fw_type = WL12XX_FW_TYPE_NONE;
5922 kfree(wl->nvs);
5923 wl->nvs = NULL;
5924
5925 kfree(wl->fw_status_1);
5926 kfree(wl->tx_res_if);
5927 destroy_workqueue(wl->freezable_wq);
5928
5929 kfree(wl->priv);
5930 ieee80211_free_hw(wl->hw);
5931
5932 return 0;
5933 }
5934 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5935
5936 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5937 {
5938 struct wl1271 *wl = cookie;
5939 unsigned long flags;
5940
5941 wl1271_debug(DEBUG_IRQ, "IRQ");
5942
5943 /* complete the ELP completion */
5944 spin_lock_irqsave(&wl->wl_lock, flags);
5945 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5946 if (wl->elp_compl) {
5947 complete(wl->elp_compl);
5948 wl->elp_compl = NULL;
5949 }
5950
5951 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5952 /* don't enqueue a work right now. mark it as pending */
5953 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5954 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5955 disable_irq_nosync(wl->irq);
5956 pm_wakeup_event(wl->dev, 0);
5957 spin_unlock_irqrestore(&wl->wl_lock, flags);
5958 return IRQ_HANDLED;
5959 }
5960 spin_unlock_irqrestore(&wl->wl_lock, flags);
5961
5962 return IRQ_WAKE_THREAD;
5963 }
5964
5965 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5966 {
5967 struct wl1271 *wl = context;
5968 struct platform_device *pdev = wl->pdev;
5969 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5970 unsigned long irqflags;
5971 int ret;
5972
5973 if (fw) {
5974 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5975 if (!wl->nvs) {
5976 wl1271_error("Could not allocate nvs data");
5977 goto out;
5978 }
5979 wl->nvs_len = fw->size;
5980 } else {
5981 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5982 WL12XX_NVS_NAME);
5983 wl->nvs = NULL;
5984 wl->nvs_len = 0;
5985 }
5986
5987 ret = wl->ops->setup(wl);
5988 if (ret < 0)
5989 goto out_free_nvs;
5990
5991 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5992
5993 /* adjust some runtime configuration parameters */
5994 wlcore_adjust_conf(wl);
5995
5996 wl->irq = platform_get_irq(pdev, 0);
5997 wl->platform_quirks = pdata->platform_quirks;
5998 wl->if_ops = pdata->ops;
5999
6000 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6001 irqflags = IRQF_TRIGGER_RISING;
6002 else
6003 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6004
6005 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
6006 irqflags,
6007 pdev->name, wl);
6008 if (ret < 0) {
6009 wl1271_error("request_irq() failed: %d", ret);
6010 goto out_free_nvs;
6011 }
6012
6013 #ifdef CONFIG_PM
6014 ret = enable_irq_wake(wl->irq);
6015 if (!ret) {
6016 wl->irq_wake_enabled = true;
6017 device_init_wakeup(wl->dev, 1);
6018 if (pdata->pwr_in_suspend) {
6019 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6020 wl->hw->wiphy->wowlan.n_patterns =
6021 WL1271_MAX_RX_FILTERS;
6022 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6023 wl->hw->wiphy->wowlan.pattern_max_len =
6024 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6025 }
6026 }
6027 #endif
6028 disable_irq(wl->irq);
6029
6030 ret = wl12xx_get_hw_info(wl);
6031 if (ret < 0) {
6032 wl1271_error("couldn't get hw info");
6033 goto out_irq;
6034 }
6035
6036 ret = wl->ops->identify_chip(wl);
6037 if (ret < 0)
6038 goto out_irq;
6039
6040 ret = wl1271_init_ieee80211(wl);
6041 if (ret)
6042 goto out_irq;
6043
6044 ret = wl1271_register_hw(wl);
6045 if (ret)
6046 goto out_irq;
6047
6048 /* Create sysfs file to control bt coex state */
6049 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6050 if (ret < 0) {
6051 wl1271_error("failed to create sysfs file bt_coex_state");
6052 goto out_unreg;
6053 }
6054
6055 /* Create sysfs file to get HW PG version */
6056 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6057 if (ret < 0) {
6058 wl1271_error("failed to create sysfs file hw_pg_ver");
6059 goto out_bt_coex_state;
6060 }
6061
6062 /* Create sysfs file for the FW log */
6063 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6064 if (ret < 0) {
6065 wl1271_error("failed to create sysfs file fwlog");
6066 goto out_hw_pg_ver;
6067 }
6068
6069 wl->initialized = true;
6070 goto out;
6071
6072 out_hw_pg_ver:
6073 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6074
6075 out_bt_coex_state:
6076 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6077
6078 out_unreg:
6079 wl1271_unregister_hw(wl);
6080
6081 out_irq:
6082 free_irq(wl->irq, wl);
6083
6084 out_free_nvs:
6085 kfree(wl->nvs);
6086
6087 out:
6088 release_firmware(fw);
6089 complete_all(&wl->nvs_loading_complete);
6090 }
6091
6092 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6093 {
6094 int ret;
6095
6096 if (!wl->ops || !wl->ptable)
6097 return -EINVAL;
6098
6099 wl->dev = &pdev->dev;
6100 wl->pdev = pdev;
6101 platform_set_drvdata(pdev, wl);
6102
6103 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6104 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6105 wl, wlcore_nvs_cb);
6106 if (ret < 0) {
6107 wl1271_error("request_firmware_nowait failed: %d", ret);
6108 complete_all(&wl->nvs_loading_complete);
6109 }
6110
6111 return ret;
6112 }
6113 EXPORT_SYMBOL_GPL(wlcore_probe);
6114
6115 int wlcore_remove(struct platform_device *pdev)
6116 {
6117 struct wl1271 *wl = platform_get_drvdata(pdev);
6118
6119 wait_for_completion(&wl->nvs_loading_complete);
6120 if (!wl->initialized)
6121 return 0;
6122
6123 if (wl->irq_wake_enabled) {
6124 device_init_wakeup(wl->dev, 0);
6125 disable_irq_wake(wl->irq);
6126 }
6127 wl1271_unregister_hw(wl);
6128 free_irq(wl->irq, wl);
6129 wlcore_free_hw(wl);
6130
6131 return 0;
6132 }
6133 EXPORT_SYMBOL_GPL(wlcore_remove);
6134
6135 u32 wl12xx_debug_level = DEBUG_NONE;
6136 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6137 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6138 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6139
6140 module_param_named(fwlog, fwlog_param, charp, 0);
6141 MODULE_PARM_DESC(fwlog,
6142 "FW logger options: continuous, ondemand, dbgpins or disable");
6143
6144 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6145 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6146
6147 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6148 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6149
6150 MODULE_LICENSE("GPL");
6151 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6152 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6153 MODULE_FIRMWARE(WL12XX_NVS_NAME);