3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static bool bug_on_recovery
;
60 static bool no_recovery
;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wl1271_op_stop(struct ieee80211_hw
*hw
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
->sta
.hlid
);
86 wl12xx_croc(wl
, wlvif
->role_id
);
88 wl1271_info("Association completed.");
92 static int wl1271_reg_notify(struct wiphy
*wiphy
,
93 struct regulatory_request
*request
)
95 struct ieee80211_supported_band
*band
;
96 struct ieee80211_channel
*ch
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
114 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
119 /* we should hold wl->mutex */
120 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
136 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
139 int period
= wl
->conf
.rx_streaming
.interval
;
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
145 /* reconfigure/disable according to new streaming_period */
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
148 (wl
->conf
.rx_streaming
.always
||
149 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
150 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
152 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif
->rx_streaming_timer
);
160 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
163 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
164 rx_streaming_enable_work
);
165 struct wl1271
*wl
= wlvif
->wl
;
167 mutex_lock(&wl
->mutex
);
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
171 (!wl
->conf
.rx_streaming
.always
&&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
175 if (!wl
->conf
.rx_streaming
.interval
)
178 ret
= wl1271_ps_elp_wakeup(wl
);
182 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif
->rx_streaming_timer
,
188 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
191 wl1271_ps_elp_sleep(wl
);
193 mutex_unlock(&wl
->mutex
);
196 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
199 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
200 rx_streaming_disable_work
);
201 struct wl1271
*wl
= wlvif
->wl
;
203 mutex_lock(&wl
->mutex
);
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
208 ret
= wl1271_ps_elp_wakeup(wl
);
212 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
217 wl1271_ps_elp_sleep(wl
);
219 mutex_unlock(&wl
->mutex
);
222 static void wl1271_rx_streaming_timer(unsigned long data
)
224 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
225 struct wl1271
*wl
= wlvif
->wl
;
226 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
232 /* if the watchdog is not armed, don't do anything */
233 if (wl
->tx_allocated_blocks
== 0)
236 cancel_delayed_work(&wl
->tx_watchdog_work
);
237 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
238 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
241 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
243 struct delayed_work
*dwork
;
246 dwork
= container_of(work
, struct delayed_work
, work
);
247 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
249 mutex_lock(&wl
->mutex
);
251 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl
->tx_allocated_blocks
== 0))
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
262 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
263 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
264 wl
->conf
.tx
.tx_watchdog_timeout
);
265 wl12xx_rearm_tx_watchdog_locked(wl
);
270 * if a scan is in progress, we might not have any Tx for a long
273 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
274 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
275 wl
->conf
.tx
.tx_watchdog_timeout
);
276 wl12xx_rearm_tx_watchdog_locked(wl
);
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
286 if (wl
->active_sta_count
) {
287 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
289 wl
->conf
.tx
.tx_watchdog_timeout
,
290 wl
->active_sta_count
);
291 wl12xx_rearm_tx_watchdog_locked(wl
);
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl
->conf
.tx
.tx_watchdog_timeout
);
297 wl12xx_queue_recovery_work(wl
);
300 mutex_unlock(&wl
->mutex
);
303 static void wlcore_adjust_conf(struct wl1271
*wl
)
305 /* Adjust settings according to optional module parameters */
307 if (!strcmp(fwlog_param
, "continuous")) {
308 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
309 } else if (!strcmp(fwlog_param
, "ondemand")) {
310 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
311 } else if (!strcmp(fwlog_param
, "dbgpins")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
314 } else if (!strcmp(fwlog_param
, "disable")) {
315 wl
->conf
.fwlog
.mem_blocks
= 0;
316 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
318 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
323 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
324 struct wl12xx_vif
*wlvif
,
327 bool fw_ps
, single_sta
;
329 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
330 single_sta
= (wl
->active_sta_count
== 1);
333 * Wake up from high level PS if the STA is asleep with too little
334 * packets in FW or if the STA is awake.
336 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
337 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
340 * Start high-level PS if the STA is asleep with enough blocks in FW.
341 * Make an exception if this is the only connected station. In this
342 * case FW-memory congestion is not a problem.
344 else if (!single_sta
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
345 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
348 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
349 struct wl12xx_vif
*wlvif
,
350 struct wl_fw_status_2
*status
)
352 struct wl1271_link
*lnk
;
356 /* TODO: also use link_fast_bitmap here */
358 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
359 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
360 wl1271_debug(DEBUG_PSM
,
361 "link ps prev 0x%x cur 0x%x changed 0x%x",
362 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
363 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
365 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
368 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
) {
369 lnk
= &wl
->links
[hlid
];
370 cnt
= status
->counters
.tx_lnk_free_pkts
[hlid
] -
371 lnk
->prev_freed_pkts
;
373 lnk
->prev_freed_pkts
= status
->counters
.tx_lnk_free_pkts
[hlid
];
374 lnk
->allocated_pkts
-= cnt
;
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 lnk
->allocated_pkts
);
381 static int wlcore_fw_status(struct wl1271
*wl
,
382 struct wl_fw_status_1
*status_1
,
383 struct wl_fw_status_2
*status_2
)
385 struct wl12xx_vif
*wlvif
;
387 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
388 int avail
, freed_blocks
;
393 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
394 sizeof(*status_2
) + wl
->fw_status_priv_len
;
396 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
401 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1
->fw_rx_counter
,
405 status_1
->drv_rx_counter
,
406 status_1
->tx_results_counter
);
408 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl
->tx_allocated_pkts
[i
] -=
411 (status_2
->counters
.tx_released_pkts
[i
] -
412 wl
->tx_pkts_freed
[i
]) & 0xff;
414 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
417 /* prevent wrap-around in total blocks counter */
418 if (likely(wl
->tx_blocks_freed
<=
419 le32_to_cpu(status_2
->total_released_blks
)))
420 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
423 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
424 le32_to_cpu(status_2
->total_released_blks
);
426 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
428 wl
->tx_allocated_blocks
-= freed_blocks
;
431 * If the FW freed some blocks:
432 * If we still have allocated blocks - re-arm the timer, Tx is
433 * not stuck. Otherwise, cancel the timer (no Tx currently).
436 if (wl
->tx_allocated_blocks
)
437 wl12xx_rearm_tx_watchdog_locked(wl
);
439 cancel_delayed_work(&wl
->tx_watchdog_work
);
442 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
445 * The FW might change the total number of TX memblocks before
446 * we get a notification about blocks being released. Thus, the
447 * available blocks calculation might yield a temporary result
448 * which is lower than the actual available blocks. Keeping in
449 * mind that only blocks that were allocated can be moved from
450 * TX to RX, tx_blocks_available should never decrease here.
452 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
455 /* if more blocks are available now, tx work can be scheduled */
456 if (wl
->tx_blocks_available
> old_tx_blk_count
)
457 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
459 /* for AP update num of allocated TX blocks per link and ps status */
460 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
461 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
464 /* update the host-chipset time offset */
466 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
467 (s64
)le32_to_cpu(status_2
->fw_localtime
);
472 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
476 /* Pass all received frames to the network stack */
477 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
478 ieee80211_rx_ni(wl
->hw
, skb
);
480 /* Return sent skbs to the network stack */
481 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
482 ieee80211_tx_status_ni(wl
->hw
, skb
);
485 static void wl1271_netstack_work(struct work_struct
*work
)
488 container_of(work
, struct wl1271
, netstack_work
);
491 wl1271_flush_deferred_work(wl
);
492 } while (skb_queue_len(&wl
->deferred_rx_queue
));
495 #define WL1271_IRQ_MAX_LOOPS 256
497 static irqreturn_t
wl1271_irq(int irq
, void *cookie
)
501 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
502 struct wl1271
*wl
= (struct wl1271
*)cookie
;
504 unsigned int defer_count
;
507 /* TX might be handled here, avoid redundant work */
508 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
509 cancel_work_sync(&wl
->tx_work
);
512 * In case edge triggered interrupt must be used, we cannot iterate
513 * more than once without introducing race conditions with the hardirq.
515 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
518 mutex_lock(&wl
->mutex
);
520 wl1271_debug(DEBUG_IRQ
, "IRQ work");
522 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
525 ret
= wl1271_ps_elp_wakeup(wl
);
529 while (!done
&& loopcount
--) {
531 * In order to avoid a race with the hardirq, clear the flag
532 * before acknowledging the chip. Since the mutex is held,
533 * wl1271_ps_elp_wakeup cannot be called concurrently.
535 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
536 smp_mb__after_clear_bit();
538 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
540 wl12xx_queue_recovery_work(wl
);
544 wlcore_hw_tx_immediate_compl(wl
);
546 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
547 intr
&= WLCORE_ALL_INTR_MASK
;
553 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
554 wl1271_error("HW watchdog interrupt received! starting recovery.");
555 wl
->watchdog_recovery
= true;
556 wl12xx_queue_recovery_work(wl
);
558 /* restarting the chip. ignore any other interrupt. */
562 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
563 wl1271_error("SW watchdog interrupt received! "
564 "starting recovery.");
565 wl
->watchdog_recovery
= true;
566 wl12xx_queue_recovery_work(wl
);
568 /* restarting the chip. ignore any other interrupt. */
572 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
573 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
575 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
577 wl12xx_queue_recovery_work(wl
);
581 /* Check if any tx blocks were freed */
582 spin_lock_irqsave(&wl
->wl_lock
, flags
);
583 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
584 wl1271_tx_total_queue_count(wl
) > 0) {
585 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
587 * In order to avoid starvation of the TX path,
588 * call the work function directly.
590 wl1271_tx_work_locked(wl
);
592 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
595 /* check for tx results */
596 ret
= wlcore_hw_tx_delayed_compl(wl
);
598 wl12xx_queue_recovery_work(wl
);
602 /* Make sure the deferred queues don't get too long */
603 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
604 skb_queue_len(&wl
->deferred_rx_queue
);
605 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
606 wl1271_flush_deferred_work(wl
);
609 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
610 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
611 ret
= wl1271_event_handle(wl
, 0);
613 wl12xx_queue_recovery_work(wl
);
618 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
619 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
620 ret
= wl1271_event_handle(wl
, 1);
622 wl12xx_queue_recovery_work(wl
);
627 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
628 wl1271_debug(DEBUG_IRQ
,
629 "WL1271_ACX_INTR_INIT_COMPLETE");
631 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
632 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
635 wl1271_ps_elp_sleep(wl
);
638 spin_lock_irqsave(&wl
->wl_lock
, flags
);
639 /* In case TX was not handled here, queue TX work */
640 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
641 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
642 wl1271_tx_total_queue_count(wl
) > 0)
643 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
644 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
646 mutex_unlock(&wl
->mutex
);
651 struct vif_counter_data
{
654 struct ieee80211_vif
*cur_vif
;
655 bool cur_vif_running
;
658 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
659 struct ieee80211_vif
*vif
)
661 struct vif_counter_data
*counter
= data
;
664 if (counter
->cur_vif
== vif
)
665 counter
->cur_vif_running
= true;
668 /* caller must not hold wl->mutex, as it might deadlock */
669 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
670 struct ieee80211_vif
*cur_vif
,
671 struct vif_counter_data
*data
)
673 memset(data
, 0, sizeof(*data
));
674 data
->cur_vif
= cur_vif
;
676 ieee80211_iterate_active_interfaces(hw
,
677 wl12xx_vif_count_iter
, data
);
680 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
682 const struct firmware
*fw
;
684 enum wl12xx_fw_type fw_type
;
688 fw_type
= WL12XX_FW_TYPE_PLT
;
689 fw_name
= wl
->plt_fw_name
;
692 * we can't call wl12xx_get_vif_count() here because
693 * wl->mutex is taken, so use the cached last_vif_count value
695 if (wl
->last_vif_count
> 1) {
696 fw_type
= WL12XX_FW_TYPE_MULTI
;
697 fw_name
= wl
->mr_fw_name
;
699 fw_type
= WL12XX_FW_TYPE_NORMAL
;
700 fw_name
= wl
->sr_fw_name
;
704 if (wl
->fw_type
== fw_type
)
707 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
709 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
712 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
717 wl1271_error("firmware size is not multiple of 32 bits: %zu",
724 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
725 wl
->fw_len
= fw
->size
;
726 wl
->fw
= vmalloc(wl
->fw_len
);
729 wl1271_error("could not allocate memory for the firmware");
734 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
736 wl
->fw_type
= fw_type
;
738 release_firmware(fw
);
743 static void wl1271_fetch_nvs(struct wl1271
*wl
)
745 const struct firmware
*fw
;
748 ret
= request_firmware(&fw
, WL12XX_NVS_NAME
, wl
->dev
);
751 wl1271_debug(DEBUG_BOOT
, "could not get nvs file %s: %d",
752 WL12XX_NVS_NAME
, ret
);
756 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
759 wl1271_error("could not allocate memory for the nvs file");
763 wl
->nvs_len
= fw
->size
;
766 release_firmware(fw
);
769 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
771 /* Avoid a recursive recovery */
772 if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
773 wlcore_disable_interrupts_nosync(wl
);
774 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
778 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
782 /* The FW log is a length-value list, find where the log end */
783 while (len
< maxlen
) {
784 if (memblock
[len
] == 0)
786 if (len
+ memblock
[len
] + 1 > maxlen
)
788 len
+= memblock
[len
] + 1;
791 /* Make sure we have enough room */
792 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
794 /* Fill the FW log file, consumed by the sysfs fwlog entry */
795 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
796 wl
->fwlog_size
+= len
;
801 #define WLCORE_FW_LOG_END 0x2000000
803 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
811 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
812 (wl
->conf
.fwlog
.mem_blocks
== 0))
815 wl1271_info("Reading FW panic log");
817 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
822 * Make sure the chip is awake and the logger isn't active.
823 * Do not send a stop fwlog command if the fw is hanged.
825 if (wl1271_ps_elp_wakeup(wl
))
827 if (!wl
->watchdog_recovery
)
828 wl12xx_cmd_stop_fwlog(wl
);
830 /* Read the first memory block address */
831 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
835 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
839 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
840 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
841 end_of_log
= WLCORE_FW_LOG_END
;
843 offset
= sizeof(addr
);
847 /* Traverse the memory blocks linked list */
849 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
850 wl1271_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
854 * Memory blocks are linked to one another. The first 4 bytes
855 * of each memory block hold the hardware address of the next
856 * one. The last memory block points to the first one in
857 * on demand mode and is equal to 0x2000000 in continuous mode.
859 addr
= le32_to_cpup((__le32
*)block
);
860 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
861 WL12XX_HW_BLOCK_SIZE
- offset
))
863 } while (addr
&& (addr
!= end_of_log
));
865 wake_up_interruptible(&wl
->fwlog_waitq
);
871 static void wl1271_recovery_work(struct work_struct
*work
)
874 container_of(work
, struct wl1271
, recovery_work
);
875 struct wl12xx_vif
*wlvif
;
876 struct ieee80211_vif
*vif
;
878 mutex_lock(&wl
->mutex
);
880 if (wl
->state
!= WL1271_STATE_ON
|| wl
->plt
)
883 wl12xx_read_fwlog_panic(wl
);
885 /* change partitions momentarily so we can read the FW pc */
886 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
887 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x "
890 wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
),
891 wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
));
892 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
894 BUG_ON(bug_on_recovery
&&
895 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
898 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
899 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
904 * Advance security sequence number to overcome potential progress
905 * in the firmware during recovery. This doens't hurt if the network is
908 wl12xx_for_each_wlvif(wl
, wlvif
) {
909 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
910 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
911 wlvif
->tx_security_seq
+=
912 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
915 /* Prevent spurious TX during FW restart */
916 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
918 if (wl
->sched_scanning
) {
919 ieee80211_sched_scan_stopped(wl
->hw
);
920 wl
->sched_scanning
= false;
923 /* reboot the chipset */
924 while (!list_empty(&wl
->wlvif_list
)) {
925 wlvif
= list_first_entry(&wl
->wlvif_list
,
926 struct wl12xx_vif
, list
);
927 vif
= wl12xx_wlvif_to_vif(wlvif
);
928 __wl1271_op_remove_interface(wl
, vif
, false);
930 wl
->watchdog_recovery
= false;
931 mutex_unlock(&wl
->mutex
);
932 wl1271_op_stop(wl
->hw
);
934 ieee80211_restart_hw(wl
->hw
);
937 * Its safe to enable TX now - the queues are stopped after a request
940 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
943 wl
->watchdog_recovery
= false;
944 mutex_unlock(&wl
->mutex
);
947 static void wl1271_fw_wakeup(struct wl1271
*wl
)
949 wl1271_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
952 static int wl1271_setup(struct wl1271
*wl
)
954 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
955 sizeof(*wl
->fw_status_2
) +
956 wl
->fw_status_priv_len
, GFP_KERNEL
);
957 if (!wl
->fw_status_1
)
960 wl
->fw_status_2
= (struct wl_fw_status_2
*)
961 (((u8
*) wl
->fw_status_1
) +
962 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
964 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
965 if (!wl
->tx_res_if
) {
966 kfree(wl
->fw_status_1
);
973 static int wl12xx_set_power_on(struct wl1271
*wl
)
977 msleep(WL1271_PRE_POWER_ON_SLEEP
);
978 ret
= wl1271_power_on(wl
);
981 msleep(WL1271_POWER_ON_SLEEP
);
985 wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
987 /* ELP module wake up */
988 wl1271_fw_wakeup(wl
);
994 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
998 ret
= wl12xx_set_power_on(wl
);
1003 * For wl127x based devices we could use the default block
1004 * size (512 bytes), but due to a bug in the sdio driver, we
1005 * need to set it explicitly after the chip is powered on. To
1006 * simplify the code and since the performance impact is
1007 * negligible, we use the same block size for all different
1010 * Check if the bus supports blocksize alignment and, if it
1011 * doesn't, make sure we don't have the quirk.
1013 if (!wl1271_set_block_size(wl
))
1014 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1016 /* TODO: make sure the lower driver has set things up correctly */
1018 ret
= wl1271_setup(wl
);
1022 ret
= wl12xx_fetch_firmware(wl
, plt
);
1030 int wl1271_plt_start(struct wl1271
*wl
)
1032 int retries
= WL1271_BOOT_RETRIES
;
1033 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1036 mutex_lock(&wl
->mutex
);
1038 wl1271_notice("power up");
1040 if (wl
->state
!= WL1271_STATE_OFF
) {
1041 wl1271_error("cannot go into PLT state because not "
1042 "in off state: %d", wl
->state
);
1049 ret
= wl12xx_chip_wakeup(wl
, true);
1053 ret
= wl
->ops
->plt_init(wl
);
1058 wl
->state
= WL1271_STATE_ON
;
1059 wl1271_notice("firmware booted in PLT mode (%s)",
1060 wl
->chip
.fw_ver_str
);
1062 /* update hw/fw version info in wiphy struct */
1063 wiphy
->hw_version
= wl
->chip
.id
;
1064 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1065 sizeof(wiphy
->fw_version
));
1070 wl1271_power_off(wl
);
1073 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1074 WL1271_BOOT_RETRIES
);
1076 mutex_unlock(&wl
->mutex
);
1081 int wl1271_plt_stop(struct wl1271
*wl
)
1085 wl1271_notice("power down");
1088 * Interrupts must be disabled before setting the state to OFF.
1089 * Otherwise, the interrupt handler might be called and exit without
1090 * reading the interrupt status.
1092 wlcore_disable_interrupts(wl
);
1093 mutex_lock(&wl
->mutex
);
1095 mutex_unlock(&wl
->mutex
);
1098 * This will not necessarily enable interrupts as interrupts
1099 * may have been disabled when op_stop was called. It will,
1100 * however, balance the above call to disable_interrupts().
1102 wlcore_enable_interrupts(wl
);
1104 wl1271_error("cannot power down because not in PLT "
1105 "state: %d", wl
->state
);
1110 mutex_unlock(&wl
->mutex
);
1112 wl1271_flush_deferred_work(wl
);
1113 cancel_work_sync(&wl
->netstack_work
);
1114 cancel_work_sync(&wl
->recovery_work
);
1115 cancel_delayed_work_sync(&wl
->elp_work
);
1116 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1117 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1119 mutex_lock(&wl
->mutex
);
1120 wl1271_power_off(wl
);
1122 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1123 wl
->state
= WL1271_STATE_OFF
;
1126 mutex_unlock(&wl
->mutex
);
1132 static void wl1271_op_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
1134 struct wl1271
*wl
= hw
->priv
;
1135 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1136 struct ieee80211_vif
*vif
= info
->control
.vif
;
1137 struct wl12xx_vif
*wlvif
= NULL
;
1138 unsigned long flags
;
1143 wlvif
= wl12xx_vif_to_data(vif
);
1145 mapping
= skb_get_queue_mapping(skb
);
1146 q
= wl1271_tx_get_queue(mapping
);
1148 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
);
1150 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1153 * drop the packet if the link is invalid or the queue is stopped
1154 * for any reason but watermark. Watermark is a "soft"-stop so we
1155 * allow these packets through.
1157 if (hlid
== WL12XX_INVALID_LINK_ID
||
1158 (wlvif
&& !test_bit(hlid
, wlvif
->links_map
)) ||
1159 (wlcore_is_queue_stopped(wl
, q
) &&
1160 !wlcore_is_queue_stopped_by_reason(wl
, q
,
1161 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1162 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1163 ieee80211_free_txskb(hw
, skb
);
1167 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1169 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1171 wl
->tx_queue_count
[q
]++;
1174 * The workqueue is slow to process the tx_queue and we need stop
1175 * the queue here, otherwise the queue will get too long.
1177 if (wl
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
) {
1178 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1179 wlcore_stop_queue_locked(wl
, q
,
1180 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1184 * The chip specific setup must run before the first TX packet -
1185 * before that, the tx_work will not be initialized!
1188 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1189 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1190 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1193 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1196 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1198 unsigned long flags
;
1201 /* no need to queue a new dummy packet if one is already pending */
1202 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1205 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1207 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1208 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1209 wl
->tx_queue_count
[q
]++;
1210 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1212 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1213 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1214 wl1271_tx_work_locked(wl
);
1217 * If the FW TX is busy, TX work will be scheduled by the threaded
1218 * interrupt handler function
1224 * The size of the dummy packet should be at least 1400 bytes. However, in
1225 * order to minimize the number of bus transactions, aligning it to 512 bytes
1226 * boundaries could be beneficial, performance wise
1228 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1230 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1232 struct sk_buff
*skb
;
1233 struct ieee80211_hdr_3addr
*hdr
;
1234 unsigned int dummy_packet_size
;
1236 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1237 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1239 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1241 wl1271_warning("Failed to allocate a dummy packet skb");
1245 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1247 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1248 memset(hdr
, 0, sizeof(*hdr
));
1249 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1250 IEEE80211_STYPE_NULLFUNC
|
1251 IEEE80211_FCTL_TODS
);
1253 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1255 /* Dummy packets require the TID to be management */
1256 skb
->priority
= WL1271_TID_MGMT
;
1258 /* Initialize all fields that might be used */
1259 skb_set_queue_mapping(skb
, 0);
1260 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1268 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1270 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1271 int i
, pattern_len
= 0;
1274 wl1271_warning("No mask in WoWLAN pattern");
1279 * The pattern is broken up into segments of bytes at different offsets
1280 * that need to be checked by the FW filter. Each segment is called
1281 * a field in the FW API. We verify that the total number of fields
1282 * required for this pattern won't exceed FW limits (8)
1283 * as well as the total fields buffer won't exceed the FW limit.
1284 * Note that if there's a pattern which crosses Ethernet/IP header
1285 * boundary a new field is required.
1287 for (i
= 0; i
< p
->pattern_len
; i
++) {
1288 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1293 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1295 fields_size
+= pattern_len
+
1296 RX_FILTER_FIELD_OVERHEAD
;
1304 fields_size
+= pattern_len
+
1305 RX_FILTER_FIELD_OVERHEAD
;
1312 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1316 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1317 wl1271_warning("RX Filter too complex. Too many segments");
1321 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1322 wl1271_warning("RX filter pattern is too big");
1329 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1331 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1334 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1341 for (i
= 0; i
< filter
->num_fields
; i
++)
1342 kfree(filter
->fields
[i
].pattern
);
1347 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1348 u16 offset
, u8 flags
,
1349 u8
*pattern
, u8 len
)
1351 struct wl12xx_rx_filter_field
*field
;
1353 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1354 wl1271_warning("Max fields per RX filter. can't alloc another");
1358 field
= &filter
->fields
[filter
->num_fields
];
1360 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1361 if (!field
->pattern
) {
1362 wl1271_warning("Failed to allocate RX filter pattern");
1366 filter
->num_fields
++;
1368 field
->offset
= cpu_to_le16(offset
);
1369 field
->flags
= flags
;
1371 memcpy(field
->pattern
, pattern
, len
);
1376 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1378 int i
, fields_size
= 0;
1380 for (i
= 0; i
< filter
->num_fields
; i
++)
1381 fields_size
+= filter
->fields
[i
].len
+
1382 sizeof(struct wl12xx_rx_filter_field
) -
1388 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1392 struct wl12xx_rx_filter_field
*field
;
1394 for (i
= 0; i
< filter
->num_fields
; i
++) {
1395 field
= (struct wl12xx_rx_filter_field
*)buf
;
1397 field
->offset
= filter
->fields
[i
].offset
;
1398 field
->flags
= filter
->fields
[i
].flags
;
1399 field
->len
= filter
->fields
[i
].len
;
1401 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1402 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1403 sizeof(u8
*) + field
->len
;
1408 * Allocates an RX filter returned through f
1409 * which needs to be freed using rx_filter_free()
1411 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1412 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1413 struct wl12xx_rx_filter
**f
)
1416 struct wl12xx_rx_filter
*filter
;
1420 filter
= wl1271_rx_filter_alloc();
1422 wl1271_warning("Failed to alloc rx filter");
1428 while (i
< p
->pattern_len
) {
1429 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1434 for (j
= i
; j
< p
->pattern_len
; j
++) {
1435 if (!test_bit(j
, (unsigned long *)p
->mask
))
1438 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1439 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1443 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1445 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1447 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1448 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1453 ret
= wl1271_rx_filter_alloc_field(filter
,
1456 &p
->pattern
[i
], len
);
1463 filter
->action
= FILTER_SIGNAL
;
1469 wl1271_rx_filter_free(filter
);
1475 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1476 struct cfg80211_wowlan
*wow
)
1480 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1481 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1482 wl1271_rx_filter_clear_all(wl
);
1486 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1489 /* Validate all incoming patterns before clearing current FW state */
1490 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1491 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1493 wl1271_warning("Bad wowlan pattern %d", i
);
1498 wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1499 wl1271_rx_filter_clear_all(wl
);
1501 /* Translate WoWLAN patterns into filters */
1502 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1503 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1504 struct wl12xx_rx_filter
*filter
= NULL
;
1506 p
= &wow
->patterns
[i
];
1508 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1510 wl1271_warning("Failed to create an RX filter from "
1511 "wowlan pattern %d", i
);
1515 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1517 wl1271_rx_filter_free(filter
);
1522 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1528 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1529 struct wl12xx_vif
*wlvif
,
1530 struct cfg80211_wowlan
*wow
)
1534 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1537 ret
= wl1271_ps_elp_wakeup(wl
);
1541 wl1271_configure_wowlan(wl
, wow
);
1542 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1543 wl
->conf
.conn
.suspend_wake_up_event
,
1544 wl
->conf
.conn
.suspend_listen_interval
);
1547 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1549 wl1271_ps_elp_sleep(wl
);
1556 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1557 struct wl12xx_vif
*wlvif
)
1561 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1564 ret
= wl1271_ps_elp_wakeup(wl
);
1568 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1570 wl1271_ps_elp_sleep(wl
);
1576 static int wl1271_configure_suspend(struct wl1271
*wl
,
1577 struct wl12xx_vif
*wlvif
,
1578 struct cfg80211_wowlan
*wow
)
1580 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1581 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1582 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1583 return wl1271_configure_suspend_ap(wl
, wlvif
);
1587 static void wl1271_configure_resume(struct wl1271
*wl
,
1588 struct wl12xx_vif
*wlvif
)
1591 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1592 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1594 if ((!is_ap
) && (!is_sta
))
1597 ret
= wl1271_ps_elp_wakeup(wl
);
1602 wl1271_configure_wowlan(wl
, NULL
);
1604 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1605 wl
->conf
.conn
.wake_up_event
,
1606 wl
->conf
.conn
.listen_interval
);
1609 wl1271_error("resume: wake up conditions failed: %d",
1613 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1616 wl1271_ps_elp_sleep(wl
);
1619 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1620 struct cfg80211_wowlan
*wow
)
1622 struct wl1271
*wl
= hw
->priv
;
1623 struct wl12xx_vif
*wlvif
;
1626 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1629 wl1271_tx_flush(wl
);
1631 mutex_lock(&wl
->mutex
);
1632 wl
->wow_enabled
= true;
1633 wl12xx_for_each_wlvif(wl
, wlvif
) {
1634 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1636 mutex_unlock(&wl
->mutex
);
1637 wl1271_warning("couldn't prepare device to suspend");
1641 mutex_unlock(&wl
->mutex
);
1642 /* flush any remaining work */
1643 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1646 * disable and re-enable interrupts in order to flush
1649 wlcore_disable_interrupts(wl
);
1652 * set suspended flag to avoid triggering a new threaded_irq
1653 * work. no need for spinlock as interrupts are disabled.
1655 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1657 wlcore_enable_interrupts(wl
);
1658 flush_work(&wl
->tx_work
);
1659 flush_delayed_work(&wl
->elp_work
);
1664 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1666 struct wl1271
*wl
= hw
->priv
;
1667 struct wl12xx_vif
*wlvif
;
1668 unsigned long flags
;
1669 bool run_irq_work
= false;
1671 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1673 WARN_ON(!wl
->wow_enabled
);
1676 * re-enable irq_work enqueuing, and call irq_work directly if
1677 * there is a pending work.
1679 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1680 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1681 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1682 run_irq_work
= true;
1683 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1686 wl1271_debug(DEBUG_MAC80211
,
1687 "run postponed irq_work directly");
1689 wlcore_enable_interrupts(wl
);
1692 mutex_lock(&wl
->mutex
);
1693 wl12xx_for_each_wlvif(wl
, wlvif
) {
1694 wl1271_configure_resume(wl
, wlvif
);
1696 wl
->wow_enabled
= false;
1697 mutex_unlock(&wl
->mutex
);
1703 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1705 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1708 * We have to delay the booting of the hardware because
1709 * we need to know the local MAC address before downloading and
1710 * initializing the firmware. The MAC address cannot be changed
1711 * after boot, and without the proper MAC address, the firmware
1712 * will not function properly.
1714 * The MAC address is first known when the corresponding interface
1715 * is added. That is where we will initialize the hardware.
1721 static void wl1271_op_stop(struct ieee80211_hw
*hw
)
1723 struct wl1271
*wl
= hw
->priv
;
1726 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1729 * Interrupts must be disabled before setting the state to OFF.
1730 * Otherwise, the interrupt handler might be called and exit without
1731 * reading the interrupt status.
1733 wlcore_disable_interrupts(wl
);
1734 mutex_lock(&wl
->mutex
);
1735 if (wl
->state
== WL1271_STATE_OFF
) {
1736 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1738 wlcore_enable_interrupts(wl
);
1740 mutex_unlock(&wl
->mutex
);
1743 * This will not necessarily enable interrupts as interrupts
1744 * may have been disabled when op_stop was called. It will,
1745 * however, balance the above call to disable_interrupts().
1747 wlcore_enable_interrupts(wl
);
1752 * this must be before the cancel_work calls below, so that the work
1753 * functions don't perform further work.
1755 wl
->state
= WL1271_STATE_OFF
;
1756 mutex_unlock(&wl
->mutex
);
1758 wl1271_flush_deferred_work(wl
);
1759 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1760 cancel_work_sync(&wl
->netstack_work
);
1761 cancel_work_sync(&wl
->tx_work
);
1762 cancel_delayed_work_sync(&wl
->elp_work
);
1763 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1764 cancel_delayed_work_sync(&wl
->connection_loss_work
);
1766 /* let's notify MAC80211 about the remaining pending TX frames */
1767 wl12xx_tx_reset(wl
);
1768 mutex_lock(&wl
->mutex
);
1770 wl1271_power_off(wl
);
1772 * In case a recovery was scheduled, interrupts were disabled to avoid
1773 * an interrupt storm. Now that the power is down, it is safe to
1774 * re-enable interrupts to balance the disable depth
1776 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1777 wlcore_enable_interrupts(wl
);
1779 wl
->band
= IEEE80211_BAND_2GHZ
;
1782 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1783 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1784 wl
->tx_blocks_available
= 0;
1785 wl
->tx_allocated_blocks
= 0;
1786 wl
->tx_results_count
= 0;
1787 wl
->tx_packets_count
= 0;
1788 wl
->time_offset
= 0;
1789 wl
->ap_fw_ps_map
= 0;
1791 wl
->sched_scanning
= false;
1792 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1793 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1794 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1795 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1796 wl
->active_sta_count
= 0;
1798 /* The system link is always allocated */
1799 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1802 * this is performed after the cancel_work calls and the associated
1803 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1804 * get executed before all these vars have been reset.
1808 wl
->tx_blocks_freed
= 0;
1810 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1811 wl
->tx_pkts_freed
[i
] = 0;
1812 wl
->tx_allocated_pkts
[i
] = 0;
1815 wl1271_debugfs_reset(wl
);
1817 kfree(wl
->fw_status_1
);
1818 wl
->fw_status_1
= NULL
;
1819 wl
->fw_status_2
= NULL
;
1820 kfree(wl
->tx_res_if
);
1821 wl
->tx_res_if
= NULL
;
1822 kfree(wl
->target_mem_map
);
1823 wl
->target_mem_map
= NULL
;
1825 mutex_unlock(&wl
->mutex
);
1828 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
1830 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
1831 WL12XX_MAX_RATE_POLICIES
);
1832 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
1835 __set_bit(policy
, wl
->rate_policies_map
);
1840 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
1842 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
1845 __clear_bit(*idx
, wl
->rate_policies_map
);
1846 *idx
= WL12XX_MAX_RATE_POLICIES
;
1849 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
1851 switch (wlvif
->bss_type
) {
1852 case BSS_TYPE_AP_BSS
:
1854 return WL1271_ROLE_P2P_GO
;
1856 return WL1271_ROLE_AP
;
1858 case BSS_TYPE_STA_BSS
:
1860 return WL1271_ROLE_P2P_CL
;
1862 return WL1271_ROLE_STA
;
1865 return WL1271_ROLE_IBSS
;
1868 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
1870 return WL12XX_INVALID_ROLE_TYPE
;
1873 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
1875 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
1878 /* clear everything but the persistent data */
1879 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
1881 switch (ieee80211_vif_type_p2p(vif
)) {
1882 case NL80211_IFTYPE_P2P_CLIENT
:
1885 case NL80211_IFTYPE_STATION
:
1886 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
1888 case NL80211_IFTYPE_ADHOC
:
1889 wlvif
->bss_type
= BSS_TYPE_IBSS
;
1891 case NL80211_IFTYPE_P2P_GO
:
1894 case NL80211_IFTYPE_AP
:
1895 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
1898 wlvif
->bss_type
= MAX_BSS_TYPE
;
1902 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
1903 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
1904 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
1906 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
1907 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
1908 /* init sta/ibss data */
1909 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
1910 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
1911 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
1912 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
1913 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
1914 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
1915 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
1918 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
1919 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
1920 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
1921 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
1922 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
1923 wl12xx_allocate_rate_policy(wl
,
1924 &wlvif
->ap
.ucast_rate_idx
[i
]);
1925 wlvif
->basic_rate_set
= CONF_TX_AP_ENABLED_RATES
;
1927 * TODO: check if basic_rate shouldn't be
1928 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
1929 * instead (the same thing for STA above).
1931 wlvif
->basic_rate
= CONF_TX_AP_ENABLED_RATES
;
1932 /* TODO: this seems to be used only for STA, check it */
1933 wlvif
->rate_set
= CONF_TX_AP_ENABLED_RATES
;
1936 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
1937 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
1938 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
1941 * mac80211 configures some values globally, while we treat them
1942 * per-interface. thus, on init, we have to copy them from wl
1944 wlvif
->band
= wl
->band
;
1945 wlvif
->channel
= wl
->channel
;
1946 wlvif
->power_level
= wl
->power_level
;
1947 wlvif
->channel_type
= wl
->channel_type
;
1949 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
1950 wl1271_rx_streaming_enable_work
);
1951 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
1952 wl1271_rx_streaming_disable_work
);
1953 INIT_LIST_HEAD(&wlvif
->list
);
1955 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
1956 (unsigned long) wlvif
);
1960 static bool wl12xx_init_fw(struct wl1271
*wl
)
1962 int retries
= WL1271_BOOT_RETRIES
;
1963 bool booted
= false;
1964 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1969 ret
= wl12xx_chip_wakeup(wl
, false);
1973 ret
= wl
->ops
->boot(wl
);
1977 ret
= wl1271_hw_init(wl
);
1985 mutex_unlock(&wl
->mutex
);
1986 /* Unlocking the mutex in the middle of handling is
1987 inherently unsafe. In this case we deem it safe to do,
1988 because we need to let any possibly pending IRQ out of
1989 the system (and while we are WL1271_STATE_OFF the IRQ
1990 work function will not do anything.) Also, any other
1991 possible concurrent operations will fail due to the
1992 current state, hence the wl1271 struct should be safe. */
1993 wlcore_disable_interrupts(wl
);
1994 wl1271_flush_deferred_work(wl
);
1995 cancel_work_sync(&wl
->netstack_work
);
1996 mutex_lock(&wl
->mutex
);
1998 wl1271_power_off(wl
);
2002 wl1271_error("firmware boot failed despite %d retries",
2003 WL1271_BOOT_RETRIES
);
2007 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2009 /* update hw/fw version info in wiphy struct */
2010 wiphy
->hw_version
= wl
->chip
.id
;
2011 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2012 sizeof(wiphy
->fw_version
));
2015 * Now we know if 11a is supported (info from the NVS), so disable
2016 * 11a channels if not supported
2018 if (!wl
->enable_11a
)
2019 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2021 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2022 wl
->enable_11a
? "" : "not ");
2024 wl
->state
= WL1271_STATE_ON
;
2029 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2031 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2035 * Check whether a fw switch (i.e. moving from one loaded
2036 * fw to another) is needed. This function is also responsible
2037 * for updating wl->last_vif_count, so it must be called before
2038 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2041 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2042 struct vif_counter_data vif_counter_data
,
2045 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2046 u8 vif_count
= vif_counter_data
.counter
;
2048 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2051 /* increase the vif count if this is a new vif */
2052 if (add
&& !vif_counter_data
.cur_vif_running
)
2055 wl
->last_vif_count
= vif_count
;
2057 /* no need for fw change if the device is OFF */
2058 if (wl
->state
== WL1271_STATE_OFF
)
2061 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2063 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2070 * Enter "forced psm". Make sure the sta is in psm against the ap,
2071 * to make the fw switch a bit more disconnection-persistent.
2073 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2075 struct wl12xx_vif
*wlvif
;
2077 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2078 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2082 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2083 struct ieee80211_vif
*vif
)
2085 struct wl1271
*wl
= hw
->priv
;
2086 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2087 struct vif_counter_data vif_count
;
2090 bool booted
= false;
2092 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2093 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2095 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2096 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2098 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2100 mutex_lock(&wl
->mutex
);
2101 ret
= wl1271_ps_elp_wakeup(wl
);
2106 * in some very corner case HW recovery scenarios its possible to
2107 * get here before __wl1271_op_remove_interface is complete, so
2108 * opt out if that is the case.
2110 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2111 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2117 ret
= wl12xx_init_vif_data(wl
, vif
);
2122 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2123 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2128 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2129 wl12xx_force_active_psm(wl
);
2130 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2131 mutex_unlock(&wl
->mutex
);
2132 wl1271_recovery_work(&wl
->recovery_work
);
2137 * TODO: after the nvs issue will be solved, move this block
2138 * to start(), and make sure here the driver is ON.
2140 if (wl
->state
== WL1271_STATE_OFF
) {
2142 * we still need this in order to configure the fw
2143 * while uploading the nvs
2145 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2147 booted
= wl12xx_init_fw(wl
);
2154 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2155 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2157 * The device role is a special role used for
2158 * rx and tx frames prior to association (as
2159 * the STA role can get packets only from
2160 * its associated bssid)
2162 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2164 &wlvif
->dev_role_id
);
2169 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2170 role_type
, &wlvif
->role_id
);
2174 ret
= wl1271_init_vif_specific(wl
, vif
);
2178 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2179 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2181 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2186 wl1271_ps_elp_sleep(wl
);
2188 mutex_unlock(&wl
->mutex
);
2193 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2194 struct ieee80211_vif
*vif
,
2195 bool reset_tx_queues
)
2197 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2199 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2201 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2203 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2206 /* because of hardware recovery, we may get here twice */
2207 if (wl
->state
!= WL1271_STATE_ON
)
2210 wl1271_info("down");
2212 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2213 wl
->scan_vif
== vif
) {
2215 * Rearm the tx watchdog just before idling scan. This
2216 * prevents just-finished scans from triggering the watchdog
2218 wl12xx_rearm_tx_watchdog_locked(wl
);
2220 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2221 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2222 wl
->scan_vif
= NULL
;
2223 wl
->scan
.req
= NULL
;
2224 ieee80211_scan_completed(wl
->hw
, true);
2227 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2228 /* disable active roles */
2229 ret
= wl1271_ps_elp_wakeup(wl
);
2233 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2234 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2235 if (wl12xx_dev_role_started(wlvif
))
2236 wl12xx_stop_dev(wl
, wlvif
);
2238 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->dev_role_id
);
2243 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2247 wl1271_ps_elp_sleep(wl
);
2250 /* clear all hlids (except system_hlid) */
2251 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2253 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2254 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2255 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2256 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2257 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2258 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2260 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2261 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2262 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2263 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2264 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2265 wl12xx_free_rate_policy(wl
,
2266 &wlvif
->ap
.ucast_rate_idx
[i
]);
2267 wl1271_free_ap_keys(wl
, wlvif
);
2270 dev_kfree_skb(wlvif
->probereq
);
2271 wlvif
->probereq
= NULL
;
2272 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2273 if (wl
->last_wlvif
== wlvif
)
2274 wl
->last_wlvif
= NULL
;
2275 list_del(&wlvif
->list
);
2276 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2277 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2278 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2285 /* Last AP, have more stations. Configure according to STA. */
2286 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2287 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2288 /* Configure for power according to debugfs */
2289 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2290 wl1271_acx_sleep_auth(wl
, sta_auth
);
2291 /* Configure for power always on */
2292 else if (wl
->quirks
& WLCORE_QUIRK_NO_ELP
)
2293 wl1271_acx_sleep_auth(wl
, WL1271_PSM_CAM
);
2294 /* Configure for ELP power saving */
2296 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2299 mutex_unlock(&wl
->mutex
);
2301 del_timer_sync(&wlvif
->rx_streaming_timer
);
2302 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2303 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2305 mutex_lock(&wl
->mutex
);
2308 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2309 struct ieee80211_vif
*vif
)
2311 struct wl1271
*wl
= hw
->priv
;
2312 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2313 struct wl12xx_vif
*iter
;
2314 struct vif_counter_data vif_count
;
2315 bool cancel_recovery
= true;
2317 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2318 mutex_lock(&wl
->mutex
);
2320 if (wl
->state
== WL1271_STATE_OFF
||
2321 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2325 * wl->vif can be null here if someone shuts down the interface
2326 * just when hardware recovery has been started.
2328 wl12xx_for_each_wlvif(wl
, iter
) {
2332 __wl1271_op_remove_interface(wl
, vif
, true);
2335 WARN_ON(iter
!= wlvif
);
2336 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2337 wl12xx_force_active_psm(wl
);
2338 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2339 wl12xx_queue_recovery_work(wl
);
2340 cancel_recovery
= false;
2343 mutex_unlock(&wl
->mutex
);
2344 if (cancel_recovery
)
2345 cancel_work_sync(&wl
->recovery_work
);
2348 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2349 struct ieee80211_vif
*vif
,
2350 enum nl80211_iftype new_type
, bool p2p
)
2352 struct wl1271
*wl
= hw
->priv
;
2355 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2356 wl1271_op_remove_interface(hw
, vif
);
2358 vif
->type
= new_type
;
2360 ret
= wl1271_op_add_interface(hw
, vif
);
2362 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2366 static int wl1271_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2370 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2373 * One of the side effects of the JOIN command is that is clears
2374 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2375 * to a WPA/WPA2 access point will therefore kill the data-path.
2376 * Currently the only valid scenario for JOIN during association
2377 * is on roaming, in which case we will also be given new keys.
2378 * Keep the below message for now, unless it starts bothering
2379 * users who really like to roam a lot :)
2381 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2382 wl1271_info("JOIN while associated.");
2384 /* clear encryption type */
2385 wlvif
->encryption_type
= KEY_NONE
;
2388 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2391 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2393 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2397 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2401 * The join command disable the keep-alive mode, shut down its process,
2402 * and also clear the template config, so we need to reset it all after
2403 * the join. The acx_aid starts the keep-alive process, and the order
2404 * of the commands below is relevant.
2406 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2410 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2414 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2418 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2419 CMD_TEMPL_KLV_IDX_NULL_DATA
,
2420 ACX_KEEP_ALIVE_TPL_VALID
);
2428 static int wl1271_unjoin(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2432 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2433 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2435 wl12xx_cmd_stop_channel_switch(wl
);
2436 ieee80211_chswitch_done(vif
, false);
2439 /* to stop listening to a channel, we disconnect */
2440 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2444 /* reset TX security counters on a clean disconnect */
2445 wlvif
->tx_security_last_seq_lsb
= 0;
2446 wlvif
->tx_security_seq
= 0;
2452 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2454 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2455 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2458 static int wl1271_sta_handle_idle(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2462 bool cur_idle
= !test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2464 if (idle
== cur_idle
)
2468 /* no need to croc if we weren't busy (e.g. during boot) */
2469 if (wl12xx_dev_role_started(wlvif
)) {
2470 ret
= wl12xx_stop_dev(wl
, wlvif
);
2475 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
2476 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2479 ret
= wl1271_acx_keep_alive_config(
2480 wl
, wlvif
, CMD_TEMPL_KLV_IDX_NULL_DATA
,
2481 ACX_KEEP_ALIVE_TPL_INVALID
);
2484 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2486 /* The current firmware only supports sched_scan in idle */
2487 if (wl
->sched_scanning
) {
2488 wl1271_scan_sched_scan_stop(wl
, wlvif
);
2489 ieee80211_sched_scan_stopped(wl
->hw
);
2492 ret
= wl12xx_start_dev(wl
, wlvif
);
2495 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
2502 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2503 struct ieee80211_conf
*conf
, u32 changed
)
2505 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2508 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2510 /* if the channel changes while joined, join again */
2511 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
&&
2512 ((wlvif
->band
!= conf
->channel
->band
) ||
2513 (wlvif
->channel
!= channel
) ||
2514 (wlvif
->channel_type
!= conf
->channel_type
))) {
2515 /* send all pending packets */
2516 wl1271_tx_work_locked(wl
);
2517 wlvif
->band
= conf
->channel
->band
;
2518 wlvif
->channel
= channel
;
2519 wlvif
->channel_type
= conf
->channel_type
;
2522 wl1271_set_band_rate(wl
, wlvif
);
2523 ret
= wl1271_init_ap_rates(wl
, wlvif
);
2525 wl1271_error("AP rate policy change failed %d",
2529 * FIXME: the mac80211 should really provide a fixed
2530 * rate to use here. for now, just use the smallest
2531 * possible rate for the band as a fixed rate for
2532 * association frames and other control messages.
2534 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2535 wl1271_set_band_rate(wl
, wlvif
);
2538 wl1271_tx_min_rate_get(wl
,
2539 wlvif
->basic_rate_set
);
2540 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2542 wl1271_warning("rate policy for channel "
2546 * change the ROC channel. do it only if we are
2547 * not idle. otherwise, CROC will be called
2550 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
,
2552 wl12xx_dev_role_started(wlvif
) &&
2553 !(conf
->flags
& IEEE80211_CONF_IDLE
)) {
2554 ret
= wl12xx_stop_dev(wl
, wlvif
);
2558 ret
= wl12xx_start_dev(wl
, wlvif
);
2565 if ((changed
& IEEE80211_CONF_CHANGE_PS
) && !is_ap
) {
2567 if ((conf
->flags
& IEEE80211_CONF_PS
) &&
2568 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
2569 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2574 if (wl
->conf
.conn
.forced_ps
) {
2575 ps_mode
= STATION_POWER_SAVE_MODE
;
2576 ps_mode_str
= "forced";
2578 ps_mode
= STATION_AUTO_PS_MODE
;
2579 ps_mode_str
= "auto";
2582 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
2584 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
2587 wl1271_warning("enter %s ps failed %d",
2590 } else if (!(conf
->flags
& IEEE80211_CONF_PS
) &&
2591 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
2593 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
2595 ret
= wl1271_ps_set_mode(wl
, wlvif
,
2596 STATION_ACTIVE_MODE
);
2598 wl1271_warning("exit auto ps failed %d", ret
);
2602 if (conf
->power_level
!= wlvif
->power_level
) {
2603 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2607 wlvif
->power_level
= conf
->power_level
;
2613 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2615 struct wl1271
*wl
= hw
->priv
;
2616 struct wl12xx_vif
*wlvif
;
2617 struct ieee80211_conf
*conf
= &hw
->conf
;
2618 int channel
, ret
= 0;
2620 channel
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2622 wl1271_debug(DEBUG_MAC80211
, "mac80211 config ch %d psm %s power %d %s"
2625 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2627 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2631 * mac80211 will go to idle nearly immediately after transmitting some
2632 * frames, such as the deauth. To make sure those frames reach the air,
2633 * wait here until the TX queue is fully flushed.
2635 if ((changed
& IEEE80211_CONF_CHANGE_CHANNEL
) ||
2636 ((changed
& IEEE80211_CONF_CHANGE_IDLE
) &&
2637 (conf
->flags
& IEEE80211_CONF_IDLE
)))
2638 wl1271_tx_flush(wl
);
2640 mutex_lock(&wl
->mutex
);
2642 /* we support configuring the channel and band even while off */
2643 if (changed
& IEEE80211_CONF_CHANGE_CHANNEL
) {
2644 wl
->band
= conf
->channel
->band
;
2645 wl
->channel
= channel
;
2646 wl
->channel_type
= conf
->channel_type
;
2649 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2650 wl
->power_level
= conf
->power_level
;
2652 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2655 ret
= wl1271_ps_elp_wakeup(wl
);
2659 /* configure each interface */
2660 wl12xx_for_each_wlvif(wl
, wlvif
) {
2661 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2667 wl1271_ps_elp_sleep(wl
);
2670 mutex_unlock(&wl
->mutex
);
2675 struct wl1271_filter_params
{
2678 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2681 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2682 struct netdev_hw_addr_list
*mc_list
)
2684 struct wl1271_filter_params
*fp
;
2685 struct netdev_hw_addr
*ha
;
2686 struct wl1271
*wl
= hw
->priv
;
2688 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2691 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2693 wl1271_error("Out of memory setting filters.");
2697 /* update multicast filtering parameters */
2698 fp
->mc_list_length
= 0;
2699 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2700 fp
->enabled
= false;
2703 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2704 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2705 ha
->addr
, ETH_ALEN
);
2706 fp
->mc_list_length
++;
2710 return (u64
)(unsigned long)fp
;
2713 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2716 FIF_BCN_PRBRESP_PROMISC | \
2720 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2721 unsigned int changed
,
2722 unsigned int *total
, u64 multicast
)
2724 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2725 struct wl1271
*wl
= hw
->priv
;
2726 struct wl12xx_vif
*wlvif
;
2730 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2731 " total %x", changed
, *total
);
2733 mutex_lock(&wl
->mutex
);
2735 *total
&= WL1271_SUPPORTED_FILTERS
;
2736 changed
&= WL1271_SUPPORTED_FILTERS
;
2738 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
2741 ret
= wl1271_ps_elp_wakeup(wl
);
2745 wl12xx_for_each_wlvif(wl
, wlvif
) {
2746 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
2747 if (*total
& FIF_ALLMULTI
)
2748 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2752 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
2755 fp
->mc_list_length
);
2762 * the fw doesn't provide an api to configure the filters. instead,
2763 * the filters configuration is based on the active roles / ROC
2768 wl1271_ps_elp_sleep(wl
);
2771 mutex_unlock(&wl
->mutex
);
2775 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2776 u8 id
, u8 key_type
, u8 key_size
,
2777 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
2780 struct wl1271_ap_key
*ap_key
;
2783 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
2785 if (key_size
> MAX_KEY_SIZE
)
2789 * Find next free entry in ap_keys. Also check we are not replacing
2792 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2793 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2796 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
2797 wl1271_warning("trying to record key replacement");
2802 if (i
== MAX_NUM_KEYS
)
2805 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
2810 ap_key
->key_type
= key_type
;
2811 ap_key
->key_size
= key_size
;
2812 memcpy(ap_key
->key
, key
, key_size
);
2813 ap_key
->hlid
= hlid
;
2814 ap_key
->tx_seq_32
= tx_seq_32
;
2815 ap_key
->tx_seq_16
= tx_seq_16
;
2817 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
2821 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2825 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2826 kfree(wlvif
->ap
.recorded_keys
[i
]);
2827 wlvif
->ap
.recorded_keys
[i
] = NULL
;
2831 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2834 struct wl1271_ap_key
*key
;
2835 bool wep_key_added
= false;
2837 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
2839 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
2842 key
= wlvif
->ap
.recorded_keys
[i
];
2844 if (hlid
== WL12XX_INVALID_LINK_ID
)
2845 hlid
= wlvif
->ap
.bcast_hlid
;
2847 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
2848 key
->id
, key
->key_type
,
2849 key
->key_size
, key
->key
,
2850 hlid
, key
->tx_seq_32
,
2855 if (key
->key_type
== KEY_WEP
)
2856 wep_key_added
= true;
2859 if (wep_key_added
) {
2860 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
2861 wlvif
->ap
.bcast_hlid
);
2867 wl1271_free_ap_keys(wl
, wlvif
);
2871 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2872 u16 action
, u8 id
, u8 key_type
,
2873 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
2874 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
2877 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2880 struct wl1271_station
*wl_sta
;
2884 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
2885 hlid
= wl_sta
->hlid
;
2887 hlid
= wlvif
->ap
.bcast_hlid
;
2890 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
2892 * We do not support removing keys after AP shutdown.
2893 * Pretend we do to make mac80211 happy.
2895 if (action
!= KEY_ADD_OR_REPLACE
)
2898 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
2900 key
, hlid
, tx_seq_32
,
2903 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
2904 id
, key_type
, key_size
,
2905 key
, hlid
, tx_seq_32
,
2913 static const u8 bcast_addr
[ETH_ALEN
] = {
2914 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2917 addr
= sta
? sta
->addr
: bcast_addr
;
2919 if (is_zero_ether_addr(addr
)) {
2920 /* We dont support TX only encryption */
2924 /* The wl1271 does not allow to remove unicast keys - they
2925 will be cleared automatically on next CMD_JOIN. Ignore the
2926 request silently, as we dont want the mac80211 to emit
2927 an error message. */
2928 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
2931 /* don't remove key if hlid was already deleted */
2932 if (action
== KEY_REMOVE
&&
2933 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
2936 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
2937 id
, key_type
, key_size
,
2938 key
, addr
, tx_seq_32
,
2943 /* the default WEP key needs to be configured at least once */
2944 if (key_type
== KEY_WEP
) {
2945 ret
= wl12xx_cmd_set_default_wep_key(wl
,
2956 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2957 struct ieee80211_vif
*vif
,
2958 struct ieee80211_sta
*sta
,
2959 struct ieee80211_key_conf
*key_conf
)
2961 struct wl1271
*wl
= hw
->priv
;
2963 return wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
2966 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
2967 struct ieee80211_vif
*vif
,
2968 struct ieee80211_sta
*sta
,
2969 struct ieee80211_key_conf
*key_conf
)
2971 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2977 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
2979 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
2980 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
2981 key_conf
->cipher
, key_conf
->keyidx
,
2982 key_conf
->keylen
, key_conf
->flags
);
2983 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
2985 mutex_lock(&wl
->mutex
);
2987 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
2992 ret
= wl1271_ps_elp_wakeup(wl
);
2996 switch (key_conf
->cipher
) {
2997 case WLAN_CIPHER_SUITE_WEP40
:
2998 case WLAN_CIPHER_SUITE_WEP104
:
3001 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3003 case WLAN_CIPHER_SUITE_TKIP
:
3004 key_type
= KEY_TKIP
;
3006 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3007 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3008 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3010 case WLAN_CIPHER_SUITE_CCMP
:
3013 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3014 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3015 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3017 case WL1271_CIPHER_SUITE_GEM
:
3019 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3020 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3023 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3031 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3032 key_conf
->keyidx
, key_type
,
3033 key_conf
->keylen
, key_conf
->key
,
3034 tx_seq_32
, tx_seq_16
, sta
);
3036 wl1271_error("Could not add or replace key");
3041 * reconfiguring arp response if the unicast (or common)
3042 * encryption key type was changed
3044 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3045 (sta
|| key_type
== KEY_WEP
) &&
3046 wlvif
->encryption_type
!= key_type
) {
3047 wlvif
->encryption_type
= key_type
;
3048 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3050 wl1271_warning("build arp rsp failed: %d", ret
);
3057 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3058 key_conf
->keyidx
, key_type
,
3059 key_conf
->keylen
, key_conf
->key
,
3062 wl1271_error("Could not remove key");
3068 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3074 wl1271_ps_elp_sleep(wl
);
3077 mutex_unlock(&wl
->mutex
);
3081 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3083 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3084 struct ieee80211_vif
*vif
,
3085 struct cfg80211_scan_request
*req
)
3087 struct wl1271
*wl
= hw
->priv
;
3092 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3095 ssid
= req
->ssids
[0].ssid
;
3096 len
= req
->ssids
[0].ssid_len
;
3099 mutex_lock(&wl
->mutex
);
3101 if (wl
->state
== WL1271_STATE_OFF
) {
3103 * We cannot return -EBUSY here because cfg80211 will expect
3104 * a call to ieee80211_scan_completed if we do - in this case
3105 * there won't be any call.
3111 ret
= wl1271_ps_elp_wakeup(wl
);
3115 /* fail if there is any role in ROC */
3116 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3117 /* don't allow scanning right now */
3122 ret
= wl1271_scan(hw
->priv
, vif
, ssid
, len
, req
);
3124 wl1271_ps_elp_sleep(wl
);
3126 mutex_unlock(&wl
->mutex
);
3131 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3132 struct ieee80211_vif
*vif
)
3134 struct wl1271
*wl
= hw
->priv
;
3137 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3139 mutex_lock(&wl
->mutex
);
3141 if (wl
->state
== WL1271_STATE_OFF
)
3144 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3147 ret
= wl1271_ps_elp_wakeup(wl
);
3151 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3152 ret
= wl1271_scan_stop(wl
);
3158 * Rearm the tx watchdog just before idling scan. This
3159 * prevents just-finished scans from triggering the watchdog
3161 wl12xx_rearm_tx_watchdog_locked(wl
);
3163 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3164 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3165 wl
->scan_vif
= NULL
;
3166 wl
->scan
.req
= NULL
;
3167 ieee80211_scan_completed(wl
->hw
, true);
3170 wl1271_ps_elp_sleep(wl
);
3172 mutex_unlock(&wl
->mutex
);
3174 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3177 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3178 struct ieee80211_vif
*vif
,
3179 struct cfg80211_sched_scan_request
*req
,
3180 struct ieee80211_sched_scan_ies
*ies
)
3182 struct wl1271
*wl
= hw
->priv
;
3183 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3186 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3188 mutex_lock(&wl
->mutex
);
3190 if (wl
->state
== WL1271_STATE_OFF
) {
3195 ret
= wl1271_ps_elp_wakeup(wl
);
3199 ret
= wl1271_scan_sched_scan_config(wl
, wlvif
, req
, ies
);
3203 ret
= wl1271_scan_sched_scan_start(wl
, wlvif
);
3207 wl
->sched_scanning
= true;
3210 wl1271_ps_elp_sleep(wl
);
3212 mutex_unlock(&wl
->mutex
);
3216 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3217 struct ieee80211_vif
*vif
)
3219 struct wl1271
*wl
= hw
->priv
;
3220 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3223 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3225 mutex_lock(&wl
->mutex
);
3227 if (wl
->state
== WL1271_STATE_OFF
)
3230 ret
= wl1271_ps_elp_wakeup(wl
);
3234 wl1271_scan_sched_scan_stop(wl
, wlvif
);
3236 wl1271_ps_elp_sleep(wl
);
3238 mutex_unlock(&wl
->mutex
);
3241 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3243 struct wl1271
*wl
= hw
->priv
;
3246 mutex_lock(&wl
->mutex
);
3248 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3253 ret
= wl1271_ps_elp_wakeup(wl
);
3257 ret
= wl1271_acx_frag_threshold(wl
, value
);
3259 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3261 wl1271_ps_elp_sleep(wl
);
3264 mutex_unlock(&wl
->mutex
);
3269 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3271 struct wl1271
*wl
= hw
->priv
;
3272 struct wl12xx_vif
*wlvif
;
3275 mutex_lock(&wl
->mutex
);
3277 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
3282 ret
= wl1271_ps_elp_wakeup(wl
);
3286 wl12xx_for_each_wlvif(wl
, wlvif
) {
3287 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3289 wl1271_warning("set rts threshold failed: %d", ret
);
3291 wl1271_ps_elp_sleep(wl
);
3294 mutex_unlock(&wl
->mutex
);
3299 static int wl1271_ssid_set(struct ieee80211_vif
*vif
, struct sk_buff
*skb
,
3302 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3304 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
3308 wl1271_error("No SSID in IEs!");
3313 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
3314 wl1271_error("SSID is too long!");
3318 wlvif
->ssid_len
= ssid_len
;
3319 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
3323 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3326 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3327 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3328 skb
->len
- ieoffset
);
3333 memmove(ie
, next
, end
- next
);
3334 skb_trim(skb
, skb
->len
- len
);
3337 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3338 unsigned int oui
, u8 oui_type
,
3342 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3343 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3344 skb
->data
+ ieoffset
,
3345 skb
->len
- ieoffset
);
3350 memmove(ie
, next
, end
- next
);
3351 skb_trim(skb
, skb
->len
- len
);
3354 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3355 struct ieee80211_vif
*vif
)
3357 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3358 struct sk_buff
*skb
;
3361 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3365 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3366 CMD_TEMPL_AP_PROBE_RESPONSE
,
3375 wl1271_debug(DEBUG_AP
, "probe response updated");
3376 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3382 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3383 struct ieee80211_vif
*vif
,
3385 size_t probe_rsp_len
,
3388 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3389 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3390 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3391 int ssid_ie_offset
, ie_offset
, templ_len
;
3394 /* no need to change probe response if the SSID is set correctly */
3395 if (wlvif
->ssid_len
> 0)
3396 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3397 CMD_TEMPL_AP_PROBE_RESPONSE
,
3402 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3403 wl1271_error("probe_rsp template too big");
3407 /* start searching from IE offset */
3408 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3410 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3411 probe_rsp_len
- ie_offset
);
3413 wl1271_error("No SSID in beacon!");
3417 ssid_ie_offset
= ptr
- probe_rsp_data
;
3418 ptr
+= (ptr
[1] + 2);
3420 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3422 /* insert SSID from bss_conf */
3423 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3424 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3425 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3426 bss_conf
->ssid
, bss_conf
->ssid_len
);
3427 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3429 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3430 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3431 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3433 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3434 CMD_TEMPL_AP_PROBE_RESPONSE
,
3440 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3441 struct ieee80211_vif
*vif
,
3442 struct ieee80211_bss_conf
*bss_conf
,
3445 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3448 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3449 if (bss_conf
->use_short_slot
)
3450 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3452 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3454 wl1271_warning("Set slot time failed %d", ret
);
3459 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3460 if (bss_conf
->use_short_preamble
)
3461 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3463 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3466 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3467 if (bss_conf
->use_cts_prot
)
3468 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3471 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3472 CTSPROTECT_DISABLE
);
3474 wl1271_warning("Set ctsprotect failed %d", ret
);
3483 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3484 struct ieee80211_vif
*vif
,
3487 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3488 struct ieee80211_hdr
*hdr
;
3491 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3493 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3501 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3503 ret
= wl1271_ssid_set(vif
, beacon
, ieoffset
);
3505 dev_kfree_skb(beacon
);
3508 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3509 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3511 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3516 dev_kfree_skb(beacon
);
3521 * In case we already have a probe-resp beacon set explicitly
3522 * by usermode, don't use the beacon data.
3524 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3527 /* remove TIM ie from probe response */
3528 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3531 * remove p2p ie from probe response.
3532 * the fw reponds to probe requests that don't include
3533 * the p2p ie. probe requests with p2p ie will be passed,
3534 * and will be responded by the supplicant (the spec
3535 * forbids including the p2p ie when responding to probe
3536 * requests that didn't include it).
3538 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3539 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3541 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3542 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3543 IEEE80211_STYPE_PROBE_RESP
);
3545 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3550 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3551 CMD_TEMPL_PROBE_RESPONSE
,
3556 dev_kfree_skb(beacon
);
3564 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3565 struct ieee80211_vif
*vif
,
3566 struct ieee80211_bss_conf
*bss_conf
,
3569 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3570 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3573 if ((changed
& BSS_CHANGED_BEACON_INT
)) {
3574 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3575 bss_conf
->beacon_int
);
3577 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3580 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3581 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3583 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3586 if ((changed
& BSS_CHANGED_BEACON
)) {
3587 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3594 wl1271_error("beacon info change failed: %d", ret
);
3598 /* AP mode changes */
3599 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3600 struct ieee80211_vif
*vif
,
3601 struct ieee80211_bss_conf
*bss_conf
,
3604 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3607 if ((changed
& BSS_CHANGED_BASIC_RATES
)) {
3608 u32 rates
= bss_conf
->basic_rates
;
3610 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3612 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3613 wlvif
->basic_rate_set
);
3615 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3617 wl1271_error("AP rate policy change failed %d", ret
);
3621 ret
= wl1271_ap_init_templates(wl
, vif
);
3625 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3629 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3634 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3638 if ((changed
& BSS_CHANGED_BEACON_ENABLED
)) {
3639 if (bss_conf
->enable_beacon
) {
3640 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3641 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3645 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3649 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3650 wl1271_debug(DEBUG_AP
, "started AP");
3653 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3654 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3658 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3659 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3661 wl1271_debug(DEBUG_AP
, "stopped AP");
3666 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3670 /* Handle HT information change */
3671 if ((changed
& BSS_CHANGED_HT
) &&
3672 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3673 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3674 bss_conf
->ht_operation_mode
);
3676 wl1271_warning("Set ht information failed %d", ret
);
3685 /* STA/IBSS mode changes */
3686 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
3687 struct ieee80211_vif
*vif
,
3688 struct ieee80211_bss_conf
*bss_conf
,
3691 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3692 bool do_join
= false, set_assoc
= false;
3693 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
3694 bool ibss_joined
= false;
3695 u32 sta_rate_set
= 0;
3697 struct ieee80211_sta
*sta
;
3698 bool sta_exists
= false;
3699 struct ieee80211_sta_ht_cap sta_ht_cap
;
3702 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
3708 if (changed
& BSS_CHANGED_IBSS
) {
3709 if (bss_conf
->ibss_joined
) {
3710 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
3713 if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
,
3715 wl1271_unjoin(wl
, wlvif
);
3719 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
3722 /* Need to update the SSID (for filtering etc) */
3723 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
3726 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
3727 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
3728 bss_conf
->enable_beacon
? "enabled" : "disabled");
3733 if (changed
& BSS_CHANGED_IDLE
&& !is_ibss
) {
3734 ret
= wl1271_sta_handle_idle(wl
, wlvif
, bss_conf
->idle
);
3736 wl1271_warning("idle mode change failed %d", ret
);
3739 if ((changed
& BSS_CHANGED_CQM
)) {
3740 bool enable
= false;
3741 if (bss_conf
->cqm_rssi_thold
)
3743 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
3744 bss_conf
->cqm_rssi_thold
,
3745 bss_conf
->cqm_rssi_hyst
);
3748 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
3751 if (changed
& BSS_CHANGED_BSSID
)
3752 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
3753 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
3757 ret
= wl1271_build_qos_null_data(wl
, vif
);
3762 if (changed
& (BSS_CHANGED_ASSOC
| BSS_CHANGED_HT
)) {
3764 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
3768 /* save the supp_rates of the ap */
3769 sta_rate_set
= sta
->supp_rates
[wl
->hw
->conf
.channel
->band
];
3770 if (sta
->ht_cap
.ht_supported
)
3772 (sta
->ht_cap
.mcs
.rx_mask
[0] << HW_HT_RATES_OFFSET
) |
3773 (sta
->ht_cap
.mcs
.rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
3774 sta_ht_cap
= sta
->ht_cap
;
3781 if ((changed
& BSS_CHANGED_ASSOC
)) {
3782 if (bss_conf
->assoc
) {
3785 wlvif
->aid
= bss_conf
->aid
;
3786 wlvif
->channel_type
= bss_conf
->channel_type
;
3787 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3792 * use basic rates from AP, and determine lowest rate
3793 * to use with control frames.
3795 rates
= bss_conf
->basic_rates
;
3796 wlvif
->basic_rate_set
=
3797 wl1271_tx_enabled_rates_get(wl
, rates
,
3800 wl1271_tx_min_rate_get(wl
,
3801 wlvif
->basic_rate_set
);
3804 wl1271_tx_enabled_rates_get(wl
,
3807 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3812 * with wl1271, we don't need to update the
3813 * beacon_int and dtim_period, because the firmware
3814 * updates it by itself when the first beacon is
3815 * received after a join.
3817 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
3822 * Get a template for hardware connection maintenance
3824 dev_kfree_skb(wlvif
->probereq
);
3825 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
3828 ieoffset
= offsetof(struct ieee80211_mgmt
,
3829 u
.probe_req
.variable
);
3830 wl1271_ssid_set(vif
, wlvif
->probereq
, ieoffset
);
3832 /* enable the connection monitoring feature */
3833 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
3837 /* use defaults when not associated */
3839 !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
,
3842 !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT
,
3846 /* free probe-request template */
3847 dev_kfree_skb(wlvif
->probereq
);
3848 wlvif
->probereq
= NULL
;
3850 /* revert back to minimum rates for the current band */
3851 wl1271_set_band_rate(wl
, wlvif
);
3853 wl1271_tx_min_rate_get(wl
,
3854 wlvif
->basic_rate_set
);
3855 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3859 /* disable connection monitor features */
3860 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
3862 /* Disable the keep-alive feature */
3863 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
3867 /* restore the bssid filter and go to dummy bssid */
3870 * we might have to disable roc, if there was
3871 * no IF_OPER_UP notification.
3874 ret
= wl12xx_croc(wl
, wlvif
->role_id
);
3879 * (we also need to disable roc in case of
3880 * roaming on the same channel. until we will
3881 * have a better flow...)
3883 if (test_bit(wlvif
->dev_role_id
, wl
->roc_map
)) {
3884 ret
= wl12xx_croc(wl
,
3885 wlvif
->dev_role_id
);
3890 wl1271_unjoin(wl
, wlvif
);
3891 if (!bss_conf
->idle
)
3892 wl12xx_start_dev(wl
, wlvif
);
3897 if (changed
& BSS_CHANGED_IBSS
) {
3898 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
3899 bss_conf
->ibss_joined
);
3901 if (bss_conf
->ibss_joined
) {
3902 u32 rates
= bss_conf
->basic_rates
;
3903 wlvif
->basic_rate_set
=
3904 wl1271_tx_enabled_rates_get(wl
, rates
,
3907 wl1271_tx_min_rate_get(wl
,
3908 wlvif
->basic_rate_set
);
3910 /* by default, use 11b + OFDM rates */
3911 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
3912 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3918 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3923 ret
= wl1271_join(wl
, wlvif
, set_assoc
);
3925 wl1271_warning("cmd join failed %d", ret
);
3929 /* ROC until connected (after EAPOL exchange) */
3931 ret
= wl12xx_roc(wl
, wlvif
, wlvif
->role_id
);
3935 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
3936 wl12xx_set_authorized(wl
, wlvif
);
3939 * stop device role if started (we might already be in
3942 if (wl12xx_dev_role_started(wlvif
)) {
3943 ret
= wl12xx_stop_dev(wl
, wlvif
);
3949 /* Handle new association with HT. Do this after join. */
3951 if ((changed
& BSS_CHANGED_HT
) &&
3952 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3953 ret
= wl1271_acx_set_ht_capabilities(wl
,
3958 wl1271_warning("Set ht cap true failed %d",
3963 /* handle new association without HT and disassociation */
3964 else if (changed
& BSS_CHANGED_ASSOC
) {
3965 ret
= wl1271_acx_set_ht_capabilities(wl
,
3970 wl1271_warning("Set ht cap false failed %d",
3977 /* Handle HT information change. Done after join. */
3978 if ((changed
& BSS_CHANGED_HT
) &&
3979 (bss_conf
->channel_type
!= NL80211_CHAN_NO_HT
)) {
3980 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3981 bss_conf
->ht_operation_mode
);
3983 wl1271_warning("Set ht information failed %d", ret
);
3988 /* Handle arp filtering. Done after join. */
3989 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
3990 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
3991 __be32 addr
= bss_conf
->arp_addr_list
[0];
3992 wlvif
->sta
.qos
= bss_conf
->qos
;
3993 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
3995 if (bss_conf
->arp_addr_cnt
== 1 &&
3996 bss_conf
->arp_filter_enabled
) {
3997 wlvif
->ip_addr
= addr
;
3999 * The template should have been configured only upon
4000 * association. however, it seems that the correct ip
4001 * isn't being set (when sending), so we have to
4002 * reconfigure the template upon every ip change.
4004 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4006 wl1271_warning("build arp rsp failed: %d", ret
);
4010 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4011 (ACX_ARP_FILTER_ARP_FILTERING
|
4012 ACX_ARP_FILTER_AUTO_ARP
),
4016 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4027 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4028 struct ieee80211_vif
*vif
,
4029 struct ieee80211_bss_conf
*bss_conf
,
4032 struct wl1271
*wl
= hw
->priv
;
4033 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4034 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4037 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info changed 0x%x",
4041 * make sure to cancel pending disconnections if our association
4044 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4045 cancel_delayed_work_sync(&wl
->connection_loss_work
);
4047 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4048 !bss_conf
->enable_beacon
)
4049 wl1271_tx_flush(wl
);
4051 mutex_lock(&wl
->mutex
);
4053 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4056 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4059 ret
= wl1271_ps_elp_wakeup(wl
);
4064 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4066 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4068 wl1271_ps_elp_sleep(wl
);
4071 mutex_unlock(&wl
->mutex
);
4074 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4075 struct ieee80211_vif
*vif
, u16 queue
,
4076 const struct ieee80211_tx_queue_params
*params
)
4078 struct wl1271
*wl
= hw
->priv
;
4079 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4083 mutex_lock(&wl
->mutex
);
4085 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4088 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4090 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4092 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4095 ret
= wl1271_ps_elp_wakeup(wl
);
4100 * the txop is confed in units of 32us by the mac80211,
4103 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4104 params
->cw_min
, params
->cw_max
,
4105 params
->aifs
, params
->txop
<< 5);
4109 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4110 CONF_CHANNEL_TYPE_EDCF
,
4111 wl1271_tx_get_queue(queue
),
4112 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4116 wl1271_ps_elp_sleep(wl
);
4119 mutex_unlock(&wl
->mutex
);
4124 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4125 struct ieee80211_vif
*vif
)
4128 struct wl1271
*wl
= hw
->priv
;
4129 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4130 u64 mactime
= ULLONG_MAX
;
4133 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4135 mutex_lock(&wl
->mutex
);
4137 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4140 ret
= wl1271_ps_elp_wakeup(wl
);
4144 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4149 wl1271_ps_elp_sleep(wl
);
4152 mutex_unlock(&wl
->mutex
);
4156 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4157 struct survey_info
*survey
)
4159 struct ieee80211_conf
*conf
= &hw
->conf
;
4164 survey
->channel
= conf
->channel
;
4169 static int wl1271_allocate_sta(struct wl1271
*wl
,
4170 struct wl12xx_vif
*wlvif
,
4171 struct ieee80211_sta
*sta
)
4173 struct wl1271_station
*wl_sta
;
4177 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4178 wl1271_warning("could not allocate HLID - too much stations");
4182 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4183 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4185 wl1271_warning("could not allocate HLID - too many links");
4189 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4190 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4191 wl
->active_sta_count
++;
4195 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4197 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4200 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4201 memset(wl
->links
[hlid
].addr
, 0, ETH_ALEN
);
4202 wl
->links
[hlid
].ba_bitmap
= 0;
4203 __clear_bit(hlid
, &wl
->ap_ps_map
);
4204 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4205 wl12xx_free_link(wl
, wlvif
, &hlid
);
4206 wl
->active_sta_count
--;
4209 * rearm the tx watchdog when the last STA is freed - give the FW a
4210 * chance to return STA-buffered packets before complaining.
4212 if (wl
->active_sta_count
== 0)
4213 wl12xx_rearm_tx_watchdog_locked(wl
);
4216 static int wl12xx_sta_add(struct wl1271
*wl
,
4217 struct wl12xx_vif
*wlvif
,
4218 struct ieee80211_sta
*sta
)
4220 struct wl1271_station
*wl_sta
;
4224 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4226 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4230 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4231 hlid
= wl_sta
->hlid
;
4233 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4235 wl1271_free_sta(wl
, wlvif
, hlid
);
4240 static int wl12xx_sta_remove(struct wl1271
*wl
,
4241 struct wl12xx_vif
*wlvif
,
4242 struct ieee80211_sta
*sta
)
4244 struct wl1271_station
*wl_sta
;
4247 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4249 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4251 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4254 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4258 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4262 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4263 struct wl12xx_vif
*wlvif
,
4264 struct ieee80211_sta
*sta
,
4265 enum ieee80211_sta_state old_state
,
4266 enum ieee80211_sta_state new_state
)
4268 struct wl1271_station
*wl_sta
;
4270 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4271 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4274 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4275 hlid
= wl_sta
->hlid
;
4277 /* Add station (AP mode) */
4279 old_state
== IEEE80211_STA_NOTEXIST
&&
4280 new_state
== IEEE80211_STA_NONE
)
4281 return wl12xx_sta_add(wl
, wlvif
, sta
);
4283 /* Remove station (AP mode) */
4285 old_state
== IEEE80211_STA_NONE
&&
4286 new_state
== IEEE80211_STA_NOTEXIST
) {
4288 wl12xx_sta_remove(wl
, wlvif
, sta
);
4292 /* Authorize station (AP mode) */
4294 new_state
== IEEE80211_STA_AUTHORIZED
) {
4295 ret
= wl12xx_cmd_set_peer_state(wl
, hlid
);
4299 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4304 /* Authorize station */
4306 new_state
== IEEE80211_STA_AUTHORIZED
) {
4307 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4308 return wl12xx_set_authorized(wl
, wlvif
);
4312 old_state
== IEEE80211_STA_AUTHORIZED
&&
4313 new_state
== IEEE80211_STA_ASSOC
) {
4314 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4321 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4322 struct ieee80211_vif
*vif
,
4323 struct ieee80211_sta
*sta
,
4324 enum ieee80211_sta_state old_state
,
4325 enum ieee80211_sta_state new_state
)
4327 struct wl1271
*wl
= hw
->priv
;
4328 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4331 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4332 sta
->aid
, old_state
, new_state
);
4334 mutex_lock(&wl
->mutex
);
4336 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4341 ret
= wl1271_ps_elp_wakeup(wl
);
4345 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4347 wl1271_ps_elp_sleep(wl
);
4349 mutex_unlock(&wl
->mutex
);
4350 if (new_state
< old_state
)
4355 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4356 struct ieee80211_vif
*vif
,
4357 enum ieee80211_ampdu_mlme_action action
,
4358 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4361 struct wl1271
*wl
= hw
->priv
;
4362 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4364 u8 hlid
, *ba_bitmap
;
4366 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4369 /* sanity check - the fields in FW are only 8bits wide */
4370 if (WARN_ON(tid
> 0xFF))
4373 mutex_lock(&wl
->mutex
);
4375 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4380 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4381 hlid
= wlvif
->sta
.hlid
;
4382 ba_bitmap
= &wlvif
->sta
.ba_rx_bitmap
;
4383 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4384 struct wl1271_station
*wl_sta
;
4386 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4387 hlid
= wl_sta
->hlid
;
4388 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4394 ret
= wl1271_ps_elp_wakeup(wl
);
4398 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4402 case IEEE80211_AMPDU_RX_START
:
4403 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4408 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4410 wl1271_error("exceeded max RX BA sessions");
4414 if (*ba_bitmap
& BIT(tid
)) {
4416 wl1271_error("cannot enable RX BA session on active "
4421 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4424 *ba_bitmap
|= BIT(tid
);
4425 wl
->ba_rx_session_count
++;
4429 case IEEE80211_AMPDU_RX_STOP
:
4430 if (!(*ba_bitmap
& BIT(tid
))) {
4432 * this happens on reconfig - so only output a debug
4433 * message for now, and don't fail the function.
4435 wl1271_debug(DEBUG_MAC80211
,
4436 "no active RX BA session on tid: %d",
4442 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4445 *ba_bitmap
&= ~BIT(tid
);
4446 wl
->ba_rx_session_count
--;
4451 * The BA initiator session management in FW independently.
4452 * Falling break here on purpose for all TX APDU commands.
4454 case IEEE80211_AMPDU_TX_START
:
4455 case IEEE80211_AMPDU_TX_STOP
:
4456 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4461 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4465 wl1271_ps_elp_sleep(wl
);
4468 mutex_unlock(&wl
->mutex
);
4473 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4474 struct ieee80211_vif
*vif
,
4475 const struct cfg80211_bitrate_mask
*mask
)
4477 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4478 struct wl1271
*wl
= hw
->priv
;
4481 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4482 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4483 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4485 mutex_lock(&wl
->mutex
);
4487 for (i
= 0; i
< IEEE80211_NUM_BANDS
; i
++)
4488 wlvif
->bitrate_masks
[i
] =
4489 wl1271_tx_enabled_rates_get(wl
,
4490 mask
->control
[i
].legacy
,
4493 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4496 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4497 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4499 ret
= wl1271_ps_elp_wakeup(wl
);
4503 wl1271_set_band_rate(wl
, wlvif
);
4505 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4506 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4508 wl1271_ps_elp_sleep(wl
);
4511 mutex_unlock(&wl
->mutex
);
4516 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4517 struct ieee80211_channel_switch
*ch_switch
)
4519 struct wl1271
*wl
= hw
->priv
;
4520 struct wl12xx_vif
*wlvif
;
4523 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4525 wl1271_tx_flush(wl
);
4527 mutex_lock(&wl
->mutex
);
4529 if (unlikely(wl
->state
== WL1271_STATE_OFF
)) {
4530 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4531 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4532 ieee80211_chswitch_done(vif
, false);
4537 ret
= wl1271_ps_elp_wakeup(wl
);
4541 /* TODO: change mac80211 to pass vif as param */
4542 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4543 ret
= wl12xx_cmd_channel_switch(wl
, wlvif
, ch_switch
);
4546 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4549 wl1271_ps_elp_sleep(wl
);
4552 mutex_unlock(&wl
->mutex
);
4555 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
4557 struct wl1271
*wl
= hw
->priv
;
4560 mutex_lock(&wl
->mutex
);
4562 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4565 /* packets are considered pending if in the TX queue or the FW */
4566 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
4568 mutex_unlock(&wl
->mutex
);
4573 /* can't be const, mac80211 writes to this */
4574 static struct ieee80211_rate wl1271_rates
[] = {
4576 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
4577 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
4579 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
4580 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
4581 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4583 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
4584 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
4585 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4587 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
4588 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
4589 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
4591 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4592 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4594 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4595 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4597 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4598 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4600 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4601 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4603 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4604 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4606 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4607 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4609 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4610 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4612 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4613 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4616 /* can't be const, mac80211 writes to this */
4617 static struct ieee80211_channel wl1271_channels
[] = {
4618 { .hw_value
= 1, .center_freq
= 2412, .max_power
= 25 },
4619 { .hw_value
= 2, .center_freq
= 2417, .max_power
= 25 },
4620 { .hw_value
= 3, .center_freq
= 2422, .max_power
= 25 },
4621 { .hw_value
= 4, .center_freq
= 2427, .max_power
= 25 },
4622 { .hw_value
= 5, .center_freq
= 2432, .max_power
= 25 },
4623 { .hw_value
= 6, .center_freq
= 2437, .max_power
= 25 },
4624 { .hw_value
= 7, .center_freq
= 2442, .max_power
= 25 },
4625 { .hw_value
= 8, .center_freq
= 2447, .max_power
= 25 },
4626 { .hw_value
= 9, .center_freq
= 2452, .max_power
= 25 },
4627 { .hw_value
= 10, .center_freq
= 2457, .max_power
= 25 },
4628 { .hw_value
= 11, .center_freq
= 2462, .max_power
= 25 },
4629 { .hw_value
= 12, .center_freq
= 2467, .max_power
= 25 },
4630 { .hw_value
= 13, .center_freq
= 2472, .max_power
= 25 },
4631 { .hw_value
= 14, .center_freq
= 2484, .max_power
= 25 },
4634 /* can't be const, mac80211 writes to this */
4635 static struct ieee80211_supported_band wl1271_band_2ghz
= {
4636 .channels
= wl1271_channels
,
4637 .n_channels
= ARRAY_SIZE(wl1271_channels
),
4638 .bitrates
= wl1271_rates
,
4639 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
4642 /* 5 GHz data rates for WL1273 */
4643 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
4645 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
4646 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
4648 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
4649 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
4651 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
4652 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
4654 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
4655 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
4657 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
4658 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
4660 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
4661 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
4663 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
4664 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
4666 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
4667 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
4670 /* 5 GHz band channels for WL1273 */
4671 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
4672 { .hw_value
= 7, .center_freq
= 5035, .max_power
= 25 },
4673 { .hw_value
= 8, .center_freq
= 5040, .max_power
= 25 },
4674 { .hw_value
= 9, .center_freq
= 5045, .max_power
= 25 },
4675 { .hw_value
= 11, .center_freq
= 5055, .max_power
= 25 },
4676 { .hw_value
= 12, .center_freq
= 5060, .max_power
= 25 },
4677 { .hw_value
= 16, .center_freq
= 5080, .max_power
= 25 },
4678 { .hw_value
= 34, .center_freq
= 5170, .max_power
= 25 },
4679 { .hw_value
= 36, .center_freq
= 5180, .max_power
= 25 },
4680 { .hw_value
= 38, .center_freq
= 5190, .max_power
= 25 },
4681 { .hw_value
= 40, .center_freq
= 5200, .max_power
= 25 },
4682 { .hw_value
= 42, .center_freq
= 5210, .max_power
= 25 },
4683 { .hw_value
= 44, .center_freq
= 5220, .max_power
= 25 },
4684 { .hw_value
= 46, .center_freq
= 5230, .max_power
= 25 },
4685 { .hw_value
= 48, .center_freq
= 5240, .max_power
= 25 },
4686 { .hw_value
= 52, .center_freq
= 5260, .max_power
= 25 },
4687 { .hw_value
= 56, .center_freq
= 5280, .max_power
= 25 },
4688 { .hw_value
= 60, .center_freq
= 5300, .max_power
= 25 },
4689 { .hw_value
= 64, .center_freq
= 5320, .max_power
= 25 },
4690 { .hw_value
= 100, .center_freq
= 5500, .max_power
= 25 },
4691 { .hw_value
= 104, .center_freq
= 5520, .max_power
= 25 },
4692 { .hw_value
= 108, .center_freq
= 5540, .max_power
= 25 },
4693 { .hw_value
= 112, .center_freq
= 5560, .max_power
= 25 },
4694 { .hw_value
= 116, .center_freq
= 5580, .max_power
= 25 },
4695 { .hw_value
= 120, .center_freq
= 5600, .max_power
= 25 },
4696 { .hw_value
= 124, .center_freq
= 5620, .max_power
= 25 },
4697 { .hw_value
= 128, .center_freq
= 5640, .max_power
= 25 },
4698 { .hw_value
= 132, .center_freq
= 5660, .max_power
= 25 },
4699 { .hw_value
= 136, .center_freq
= 5680, .max_power
= 25 },
4700 { .hw_value
= 140, .center_freq
= 5700, .max_power
= 25 },
4701 { .hw_value
= 149, .center_freq
= 5745, .max_power
= 25 },
4702 { .hw_value
= 153, .center_freq
= 5765, .max_power
= 25 },
4703 { .hw_value
= 157, .center_freq
= 5785, .max_power
= 25 },
4704 { .hw_value
= 161, .center_freq
= 5805, .max_power
= 25 },
4705 { .hw_value
= 165, .center_freq
= 5825, .max_power
= 25 },
4708 static struct ieee80211_supported_band wl1271_band_5ghz
= {
4709 .channels
= wl1271_channels_5ghz
,
4710 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
4711 .bitrates
= wl1271_rates_5ghz
,
4712 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
4715 static const struct ieee80211_ops wl1271_ops
= {
4716 .start
= wl1271_op_start
,
4717 .stop
= wl1271_op_stop
,
4718 .add_interface
= wl1271_op_add_interface
,
4719 .remove_interface
= wl1271_op_remove_interface
,
4720 .change_interface
= wl12xx_op_change_interface
,
4722 .suspend
= wl1271_op_suspend
,
4723 .resume
= wl1271_op_resume
,
4725 .config
= wl1271_op_config
,
4726 .prepare_multicast
= wl1271_op_prepare_multicast
,
4727 .configure_filter
= wl1271_op_configure_filter
,
4729 .set_key
= wlcore_op_set_key
,
4730 .hw_scan
= wl1271_op_hw_scan
,
4731 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
4732 .sched_scan_start
= wl1271_op_sched_scan_start
,
4733 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
4734 .bss_info_changed
= wl1271_op_bss_info_changed
,
4735 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
4736 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
4737 .conf_tx
= wl1271_op_conf_tx
,
4738 .get_tsf
= wl1271_op_get_tsf
,
4739 .get_survey
= wl1271_op_get_survey
,
4740 .sta_state
= wl12xx_op_sta_state
,
4741 .ampdu_action
= wl1271_op_ampdu_action
,
4742 .tx_frames_pending
= wl1271_tx_frames_pending
,
4743 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
4744 .channel_switch
= wl12xx_op_channel_switch
,
4745 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
4749 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
4755 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
4756 wl1271_error("Illegal RX rate from HW: %d", rate
);
4760 idx
= wl
->band_rate_to_idx
[band
][rate
];
4761 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
4762 wl1271_error("Unsupported RX rate from HW: %d", rate
);
4769 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
4770 struct device_attribute
*attr
,
4773 struct wl1271
*wl
= dev_get_drvdata(dev
);
4778 mutex_lock(&wl
->mutex
);
4779 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
4781 mutex_unlock(&wl
->mutex
);
4787 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
4788 struct device_attribute
*attr
,
4789 const char *buf
, size_t count
)
4791 struct wl1271
*wl
= dev_get_drvdata(dev
);
4795 ret
= kstrtoul(buf
, 10, &res
);
4797 wl1271_warning("incorrect value written to bt_coex_mode");
4801 mutex_lock(&wl
->mutex
);
4805 if (res
== wl
->sg_enabled
)
4808 wl
->sg_enabled
= res
;
4810 if (wl
->state
== WL1271_STATE_OFF
)
4813 ret
= wl1271_ps_elp_wakeup(wl
);
4817 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
4818 wl1271_ps_elp_sleep(wl
);
4821 mutex_unlock(&wl
->mutex
);
4825 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
4826 wl1271_sysfs_show_bt_coex_state
,
4827 wl1271_sysfs_store_bt_coex_state
);
4829 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
4830 struct device_attribute
*attr
,
4833 struct wl1271
*wl
= dev_get_drvdata(dev
);
4838 mutex_lock(&wl
->mutex
);
4839 if (wl
->hw_pg_ver
>= 0)
4840 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
4842 len
= snprintf(buf
, len
, "n/a\n");
4843 mutex_unlock(&wl
->mutex
);
4848 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
4849 wl1271_sysfs_show_hw_pg_ver
, NULL
);
4851 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
4852 struct bin_attribute
*bin_attr
,
4853 char *buffer
, loff_t pos
, size_t count
)
4855 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
4856 struct wl1271
*wl
= dev_get_drvdata(dev
);
4860 ret
= mutex_lock_interruptible(&wl
->mutex
);
4862 return -ERESTARTSYS
;
4864 /* Let only one thread read the log at a time, blocking others */
4865 while (wl
->fwlog_size
== 0) {
4868 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
4870 TASK_INTERRUPTIBLE
);
4872 if (wl
->fwlog_size
!= 0) {
4873 finish_wait(&wl
->fwlog_waitq
, &wait
);
4877 mutex_unlock(&wl
->mutex
);
4880 finish_wait(&wl
->fwlog_waitq
, &wait
);
4882 if (signal_pending(current
))
4883 return -ERESTARTSYS
;
4885 ret
= mutex_lock_interruptible(&wl
->mutex
);
4887 return -ERESTARTSYS
;
4890 /* Check if the fwlog is still valid */
4891 if (wl
->fwlog_size
< 0) {
4892 mutex_unlock(&wl
->mutex
);
4896 /* Seeking is not supported - old logs are not kept. Disregard pos. */
4897 len
= min(count
, (size_t)wl
->fwlog_size
);
4898 wl
->fwlog_size
-= len
;
4899 memcpy(buffer
, wl
->fwlog
, len
);
4901 /* Make room for new messages */
4902 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
4904 mutex_unlock(&wl
->mutex
);
4909 static struct bin_attribute fwlog_attr
= {
4910 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
4911 .read
= wl1271_sysfs_read_fwlog
,
4914 static void wl1271_connection_loss_work(struct work_struct
*work
)
4916 struct delayed_work
*dwork
;
4918 struct ieee80211_vif
*vif
;
4919 struct wl12xx_vif
*wlvif
;
4921 dwork
= container_of(work
, struct delayed_work
, work
);
4922 wl
= container_of(dwork
, struct wl1271
, connection_loss_work
);
4924 wl1271_info("Connection loss work.");
4926 mutex_lock(&wl
->mutex
);
4928 if (unlikely(wl
->state
== WL1271_STATE_OFF
))
4931 /* Call mac80211 connection loss */
4932 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4933 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
4935 vif
= wl12xx_wlvif_to_vif(wlvif
);
4936 ieee80211_connection_loss(vif
);
4939 mutex_unlock(&wl
->mutex
);
4942 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
,
4943 u32 oui
, u32 nic
, int n
)
4947 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x, n %d",
4950 if (nic
+ n
- 1 > 0xffffff)
4951 wl1271_warning("NIC part of the MAC address wraps around!");
4953 for (i
= 0; i
< n
; i
++) {
4954 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
4955 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
4956 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
4957 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
4958 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
4959 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
4963 wl
->hw
->wiphy
->n_addresses
= n
;
4964 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
4967 static int wl12xx_get_hw_info(struct wl1271
*wl
)
4971 ret
= wl12xx_set_power_on(wl
);
4975 wl
->chip
.id
= wlcore_read_reg(wl
, REG_CHIP_ID_B
);
4977 wl
->fuse_oui_addr
= 0;
4978 wl
->fuse_nic_addr
= 0;
4980 wl
->hw_pg_ver
= wl
->ops
->get_pg_ver(wl
);
4982 if (wl
->ops
->get_mac
)
4983 wl
->ops
->get_mac(wl
);
4985 wl1271_power_off(wl
);
4990 static int wl1271_register_hw(struct wl1271
*wl
)
4993 u32 oui_addr
= 0, nic_addr
= 0;
4995 if (wl
->mac80211_registered
)
4998 wl1271_fetch_nvs(wl
);
4999 if (wl
->nvs
!= NULL
) {
5000 /* NOTE: The wl->nvs->nvs element must be first, in
5001 * order to simplify the casting, we assume it is at
5002 * the beginning of the wl->nvs structure.
5004 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5007 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5009 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5012 /* if the MAC address is zeroed in the NVS derive from fuse */
5013 if (oui_addr
== 0 && nic_addr
== 0) {
5014 oui_addr
= wl
->fuse_oui_addr
;
5015 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5016 nic_addr
= wl
->fuse_nic_addr
+ 1;
5019 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
, 2);
5021 ret
= ieee80211_register_hw(wl
->hw
);
5023 wl1271_error("unable to register mac80211 hw: %d", ret
);
5027 wl
->mac80211_registered
= true;
5029 wl1271_debugfs_init(wl
);
5031 wl1271_notice("loaded");
5037 static void wl1271_unregister_hw(struct wl1271
*wl
)
5040 wl1271_plt_stop(wl
);
5042 ieee80211_unregister_hw(wl
->hw
);
5043 wl
->mac80211_registered
= false;
5047 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5050 .types
= BIT(NL80211_IFTYPE_STATION
),
5054 .types
= BIT(NL80211_IFTYPE_AP
) |
5055 BIT(NL80211_IFTYPE_P2P_GO
) |
5056 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5060 static const struct ieee80211_iface_combination
5061 wlcore_iface_combinations
[] = {
5063 .num_different_channels
= 1,
5064 .max_interfaces
= 2,
5065 .limits
= wlcore_iface_limits
,
5066 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5070 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5072 static const u32 cipher_suites
[] = {
5073 WLAN_CIPHER_SUITE_WEP40
,
5074 WLAN_CIPHER_SUITE_WEP104
,
5075 WLAN_CIPHER_SUITE_TKIP
,
5076 WLAN_CIPHER_SUITE_CCMP
,
5077 WL1271_CIPHER_SUITE_GEM
,
5080 /* The tx descriptor buffer */
5081 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5083 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5084 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5087 /* FIXME: find a proper value */
5088 wl
->hw
->channel_change_time
= 10000;
5089 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5091 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5092 IEEE80211_HW_SUPPORTS_PS
|
5093 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5094 IEEE80211_HW_SUPPORTS_UAPSD
|
5095 IEEE80211_HW_HAS_RATE_CONTROL
|
5096 IEEE80211_HW_CONNECTION_MONITOR
|
5097 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5098 IEEE80211_HW_SPECTRUM_MGMT
|
5099 IEEE80211_HW_AP_LINK_PS
|
5100 IEEE80211_HW_AMPDU_AGGREGATION
|
5101 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5102 IEEE80211_HW_SCAN_WHILE_IDLE
;
5104 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5105 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5107 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5108 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5109 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5110 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5111 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5112 wl
->hw
->wiphy
->max_match_sets
= 16;
5114 * Maximum length of elements in scanning probe request templates
5115 * should be the maximum length possible for a template, without
5116 * the IEEE80211 header of the template
5118 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5119 sizeof(struct ieee80211_header
);
5121 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5122 sizeof(struct ieee80211_header
);
5124 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5125 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5127 /* make sure all our channels fit in the scanned_ch bitmask */
5128 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5129 ARRAY_SIZE(wl1271_channels_5ghz
) >
5130 WL1271_MAX_CHANNELS
);
5132 * We keep local copies of the band structs because we need to
5133 * modify them on a per-device basis.
5135 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5136 sizeof(wl1271_band_2ghz
));
5137 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5138 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5139 sizeof(*wl
->ht_cap
));
5140 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5141 sizeof(wl1271_band_5ghz
));
5142 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5143 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5144 sizeof(*wl
->ht_cap
));
5146 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5147 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5148 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5149 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5152 wl
->hw
->max_rates
= 1;
5154 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5156 /* the FW answers probe-requests in AP-mode */
5157 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5158 wl
->hw
->wiphy
->probe_resp_offload
=
5159 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5160 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5161 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5163 /* allowed interface combinations */
5164 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5165 wl
->hw
->wiphy
->n_iface_combinations
=
5166 ARRAY_SIZE(wlcore_iface_combinations
);
5168 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5170 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5171 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5173 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5178 #define WL1271_DEFAULT_CHANNEL 0
5180 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
)
5182 struct ieee80211_hw
*hw
;
5187 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5189 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5191 wl1271_error("could not alloc ieee80211_hw");
5197 memset(wl
, 0, sizeof(*wl
));
5199 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5201 wl1271_error("could not alloc wl priv");
5203 goto err_priv_alloc
;
5206 INIT_LIST_HEAD(&wl
->wlvif_list
);
5210 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5211 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5212 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5214 skb_queue_head_init(&wl
->deferred_rx_queue
);
5215 skb_queue_head_init(&wl
->deferred_tx_queue
);
5217 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5218 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5219 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5220 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5221 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5222 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5223 INIT_DELAYED_WORK(&wl
->connection_loss_work
,
5224 wl1271_connection_loss_work
);
5226 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5227 if (!wl
->freezable_wq
) {
5232 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5234 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5235 wl
->band
= IEEE80211_BAND_2GHZ
;
5236 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5238 wl
->sg_enabled
= true;
5239 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5242 wl
->ap_fw_ps_map
= 0;
5244 wl
->platform_quirks
= 0;
5245 wl
->sched_scanning
= false;
5246 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5247 wl
->active_sta_count
= 0;
5249 init_waitqueue_head(&wl
->fwlog_waitq
);
5251 /* The system link is always allocated */
5252 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5254 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5255 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5256 wl
->tx_frames
[i
] = NULL
;
5258 spin_lock_init(&wl
->wl_lock
);
5260 wl
->state
= WL1271_STATE_OFF
;
5261 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5262 mutex_init(&wl
->mutex
);
5263 mutex_init(&wl
->flush_mutex
);
5265 order
= get_order(WL1271_AGGR_BUFFER_SIZE
);
5266 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5267 if (!wl
->aggr_buf
) {
5272 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5273 if (!wl
->dummy_packet
) {
5278 /* Allocate one page for the FW log */
5279 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5282 goto err_dummy_packet
;
5285 wl
->mbox
= kmalloc(sizeof(*wl
->mbox
), GFP_KERNEL
| GFP_DMA
);
5294 free_page((unsigned long)wl
->fwlog
);
5297 dev_kfree_skb(wl
->dummy_packet
);
5300 free_pages((unsigned long)wl
->aggr_buf
, order
);
5303 destroy_workqueue(wl
->freezable_wq
);
5306 wl1271_debugfs_exit(wl
);
5310 ieee80211_free_hw(hw
);
5314 return ERR_PTR(ret
);
5316 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5318 int wlcore_free_hw(struct wl1271
*wl
)
5320 /* Unblock any fwlog readers */
5321 mutex_lock(&wl
->mutex
);
5322 wl
->fwlog_size
= -1;
5323 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5324 mutex_unlock(&wl
->mutex
);
5326 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5328 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5330 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5331 free_page((unsigned long)wl
->fwlog
);
5332 dev_kfree_skb(wl
->dummy_packet
);
5333 free_pages((unsigned long)wl
->aggr_buf
,
5334 get_order(WL1271_AGGR_BUFFER_SIZE
));
5336 wl1271_debugfs_exit(wl
);
5340 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5344 kfree(wl
->fw_status_1
);
5345 kfree(wl
->tx_res_if
);
5346 destroy_workqueue(wl
->freezable_wq
);
5349 ieee80211_free_hw(wl
->hw
);
5353 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5355 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5357 struct wl1271
*wl
= cookie
;
5358 unsigned long flags
;
5360 wl1271_debug(DEBUG_IRQ
, "IRQ");
5362 /* complete the ELP completion */
5363 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5364 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5365 if (wl
->elp_compl
) {
5366 complete(wl
->elp_compl
);
5367 wl
->elp_compl
= NULL
;
5370 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5371 /* don't enqueue a work right now. mark it as pending */
5372 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5373 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5374 disable_irq_nosync(wl
->irq
);
5375 pm_wakeup_event(wl
->dev
, 0);
5376 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5379 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5381 return IRQ_WAKE_THREAD
;
5384 int __devinit
wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
5386 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5387 unsigned long irqflags
;
5390 if (!wl
->ops
|| !wl
->ptable
) {
5395 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5397 /* adjust some runtime configuration parameters */
5398 wlcore_adjust_conf(wl
);
5400 wl
->irq
= platform_get_irq(pdev
, 0);
5401 wl
->platform_quirks
= pdata
->platform_quirks
;
5402 wl
->set_power
= pdata
->set_power
;
5403 wl
->dev
= &pdev
->dev
;
5404 wl
->if_ops
= pdata
->ops
;
5406 platform_set_drvdata(pdev
, wl
);
5408 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
5409 irqflags
= IRQF_TRIGGER_RISING
;
5411 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
5413 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wl1271_irq
,
5417 wl1271_error("request_irq() failed: %d", ret
);
5421 ret
= enable_irq_wake(wl
->irq
);
5423 wl
->irq_wake_enabled
= true;
5424 device_init_wakeup(wl
->dev
, 1);
5425 if (pdata
->pwr_in_suspend
) {
5426 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
5427 wl
->hw
->wiphy
->wowlan
.n_patterns
=
5428 WL1271_MAX_RX_FILTERS
;
5429 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
5430 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
5431 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
5434 disable_irq(wl
->irq
);
5436 ret
= wl12xx_get_hw_info(wl
);
5438 wl1271_error("couldn't get hw info");
5442 ret
= wl
->ops
->identify_chip(wl
);
5446 ret
= wl1271_init_ieee80211(wl
);
5450 ret
= wl1271_register_hw(wl
);
5454 /* Create sysfs file to control bt coex state */
5455 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
5457 wl1271_error("failed to create sysfs file bt_coex_state");
5461 /* Create sysfs file to get HW PG version */
5462 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5464 wl1271_error("failed to create sysfs file hw_pg_ver");
5465 goto out_bt_coex_state
;
5468 /* Create sysfs file for the FW log */
5469 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
5471 wl1271_error("failed to create sysfs file fwlog");
5478 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5481 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5484 free_irq(wl
->irq
, wl
);
5492 EXPORT_SYMBOL_GPL(wlcore_probe
);
5494 int __devexit
wlcore_remove(struct platform_device
*pdev
)
5496 struct wl1271
*wl
= platform_get_drvdata(pdev
);
5498 if (wl
->irq_wake_enabled
) {
5499 device_init_wakeup(wl
->dev
, 0);
5500 disable_irq_wake(wl
->irq
);
5502 wl1271_unregister_hw(wl
);
5503 free_irq(wl
->irq
, wl
);
5508 EXPORT_SYMBOL_GPL(wlcore_remove
);
5510 u32 wl12xx_debug_level
= DEBUG_NONE
;
5511 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
5512 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
5513 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
5515 module_param_named(fwlog
, fwlog_param
, charp
, 0);
5516 MODULE_PARM_DESC(fwlog
,
5517 "FW logger options: continuous, ondemand, dbgpins or disable");
5519 module_param(bug_on_recovery
, bool, S_IRUSR
| S_IWUSR
);
5520 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
5522 module_param(no_recovery
, bool, S_IRUSR
| S_IWUSR
);
5523 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
5525 MODULE_LICENSE("GPL");
5526 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5527 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");