blob: 9c16e2a6d185e31c1391e6d4aa4f14c2c9013cd4 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
20{
21 switch (wmi_cmd) {
22 case WMI_ECHO_CMDID:
23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID:
25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_GET_FW_VERSION:
27 return "WMI_GET_FW_VERSION";
28 case WMI_DISABLE_INTR_CMDID:
29 return "WMI_DISABLE_INTR_CMDID";
30 case WMI_ENABLE_INTR_CMDID:
31 return "WMI_ENABLE_INTR_CMDID";
32 case WMI_ATH_INIT_CMDID:
33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID:
35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID:
37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_ABORT_TX_DMA_CMDID:
39 return "WMI_ABORT_TX_DMA_CMDID";
40 case WMI_DRAIN_TXQ_CMDID:
41 return "WMI_DRAIN_TXQ_CMDID";
42 case WMI_DRAIN_TXQ_ALL_CMDID:
43 return "WMI_DRAIN_TXQ_ALL_CMDID";
44 case WMI_START_RECV_CMDID:
45 return "WMI_START_RECV_CMDID";
46 case WMI_STOP_RECV_CMDID:
47 return "WMI_STOP_RECV_CMDID";
48 case WMI_FLUSH_RECV_CMDID:
49 return "WMI_FLUSH_RECV_CMDID";
50 case WMI_SET_MODE_CMDID:
51 return "WMI_SET_MODE_CMDID";
52 case WMI_NODE_CREATE_CMDID:
53 return "WMI_NODE_CREATE_CMDID";
54 case WMI_NODE_REMOVE_CMDID:
55 return "WMI_NODE_REMOVE_CMDID";
56 case WMI_VAP_REMOVE_CMDID:
57 return "WMI_VAP_REMOVE_CMDID";
58 case WMI_VAP_CREATE_CMDID:
59 return "WMI_VAP_CREATE_CMDID";
60 case WMI_REG_READ_CMDID:
61 return "WMI_REG_READ_CMDID";
62 case WMI_REG_WRITE_CMDID:
63 return "WMI_REG_WRITE_CMDID";
64 case WMI_REG_RMW_CMDID:
65 return "WMI_REG_RMW_CMDID";
66 case WMI_RC_STATE_CHANGE_CMDID:
67 return "WMI_RC_STATE_CHANGE_CMDID";
68 case WMI_RC_RATE_UPDATE_CMDID:
69 return "WMI_RC_RATE_UPDATE_CMDID";
70 case WMI_TARGET_IC_UPDATE_CMDID:
71 return "WMI_TARGET_IC_UPDATE_CMDID";
72 case WMI_TX_AGGR_ENABLE_CMDID:
73 return "WMI_TX_AGGR_ENABLE_CMDID";
74 case WMI_TGT_DETACH_CMDID:
75 return "WMI_TGT_DETACH_CMDID";
76 case WMI_NODE_UPDATE_CMDID:
77 return "WMI_NODE_UPDATE_CMDID";
78 case WMI_INT_STATS_CMDID:
79 return "WMI_INT_STATS_CMDID";
80 case WMI_TX_STATS_CMDID:
81 return "WMI_TX_STATS_CMDID";
82 case WMI_RX_STATS_CMDID:
83 return "WMI_RX_STATS_CMDID";
84 case WMI_BITRATE_MASK_CMDID:
85 return "WMI_BITRATE_MASK_CMDID";
86 }
87
88 return "Bogus";
89}
90
91struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
92{
93 struct wmi *wmi;
94
95 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
96 if (!wmi)
97 return NULL;
98
99 wmi->drv_priv = priv;
100 wmi->stopped = false;
101 skb_queue_head_init(&wmi->wmi_event_queue);
102 spin_lock_init(&wmi->wmi_lock);
103 spin_lock_init(&wmi->event_lock);
104 mutex_init(&wmi->op_mutex);
105 mutex_init(&wmi->multi_write_mutex);
106 mutex_init(&wmi->multi_rmw_mutex);
107 init_completion(&wmi->cmd_wait);
108 INIT_LIST_HEAD(&wmi->pending_tx_events);
109 tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
110 (unsigned long)wmi);
111
112 return wmi;
113}
114
115void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
116{
117 struct wmi *wmi = priv->wmi;
118
119 mutex_lock(&wmi->op_mutex);
120 wmi->stopped = true;
121 mutex_unlock(&wmi->op_mutex);
122
123 kfree(priv->wmi);
124}
125
126void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
127{
128 unsigned long flags;
129
130 tasklet_kill(&priv->wmi->wmi_event_tasklet);
131 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
132 __skb_queue_purge(&priv->wmi->wmi_event_queue);
133 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
134}
135
136void ath9k_wmi_event_tasklet(unsigned long data)
137{
138 struct wmi *wmi = (struct wmi *)data;
139 struct ath9k_htc_priv *priv = wmi->drv_priv;
140 struct wmi_cmd_hdr *hdr;
141 void *wmi_event;
142 struct wmi_event_swba *swba;
143 struct sk_buff *skb = NULL;
144 unsigned long flags;
145 u16 cmd_id;
146
147 do {
148 spin_lock_irqsave(&wmi->wmi_lock, flags);
149 skb = __skb_dequeue(&wmi->wmi_event_queue);
150 if (!skb) {
151 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
152 return;
153 }
154 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
155
156 hdr = (struct wmi_cmd_hdr *) skb->data;
157 cmd_id = be16_to_cpu(hdr->command_id);
158 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
159
160 switch (cmd_id) {
161 case WMI_SWBA_EVENTID:
162 swba = (struct wmi_event_swba *) wmi_event;
163 ath9k_htc_swba(priv, swba);
164 break;
165 case WMI_FATAL_EVENTID:
166 ieee80211_queue_work(wmi->drv_priv->hw,
167 &wmi->drv_priv->fatal_work);
168 break;
169 case WMI_TXSTATUS_EVENTID:
170 spin_lock_bh(&priv->tx.tx_lock);
171 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
172 spin_unlock_bh(&priv->tx.tx_lock);
173 break;
174 }
175 spin_unlock_bh(&priv->tx.tx_lock);
176
177 ath9k_htc_txstatus(priv, wmi_event);
178 break;
179 default:
180 break;
181 }
182
183 kfree_skb(skb);
184 } while (1);
185}
186
187void ath9k_fatal_work(struct work_struct *work)
188{
189 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
190 fatal_work);
191 struct ath_common *common = ath9k_hw_common(priv->ah);
192
193 ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
194 ath9k_htc_reset(priv);
195}
196
197static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
198{
199 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
200
201 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
202 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
203
204 complete(&wmi->cmd_wait);
205}
206
207static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
208 enum htc_endpoint_id epid)
209{
210 struct wmi *wmi = (struct wmi *) priv;
211 struct wmi_cmd_hdr *hdr;
212 u16 cmd_id;
213
214 if (unlikely(wmi->stopped))
215 goto free_skb;
216
217 hdr = (struct wmi_cmd_hdr *) skb->data;
218 cmd_id = be16_to_cpu(hdr->command_id);
219
220 if (cmd_id & 0x1000) {
221 spin_lock(&wmi->wmi_lock);
222 __skb_queue_tail(&wmi->wmi_event_queue, skb);
223 spin_unlock(&wmi->wmi_lock);
224 tasklet_schedule(&wmi->wmi_event_tasklet);
225 return;
226 }
227
228 /* Check if there has been a timeout. */
229 spin_lock(&wmi->wmi_lock);
230 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
231 spin_unlock(&wmi->wmi_lock);
232 goto free_skb;
233 }
234 spin_unlock(&wmi->wmi_lock);
235
236 /* WMI command response */
237 ath9k_wmi_rsp_callback(wmi, skb);
238
239free_skb:
240 kfree_skb(skb);
241}
242
243static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
244 enum htc_endpoint_id epid, bool txok)
245{
246 kfree_skb(skb);
247}
248
249int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
250 enum htc_endpoint_id *wmi_ctrl_epid)
251{
252 struct htc_service_connreq connect;
253 int ret;
254
255 wmi->htc = htc;
256
257 memset(&connect, 0, sizeof(connect));
258
259 connect.ep_callbacks.priv = wmi;
260 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
261 connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
262 connect.service_id = WMI_CONTROL_SVC;
263
264 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
265 if (ret)
266 return ret;
267
268 *wmi_ctrl_epid = wmi->ctrl_epid;
269
270 return 0;
271}
272
273static int ath9k_wmi_cmd_issue(struct wmi *wmi,
274 struct sk_buff *skb,
275 enum wmi_cmd_id cmd, u16 len)
276{
277 struct wmi_cmd_hdr *hdr;
278 unsigned long flags;
279
280 hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
281 hdr->command_id = cpu_to_be16(cmd);
282 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
283
284 spin_lock_irqsave(&wmi->wmi_lock, flags);
285 wmi->last_seq_id = wmi->tx_seq_id;
286 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
287
288 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
289}
290
291int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
292 u8 *cmd_buf, u32 cmd_len,
293 u8 *rsp_buf, u32 rsp_len,
294 u32 timeout)
295{
296 struct ath_hw *ah = wmi->drv_priv->ah;
297 struct ath_common *common = ath9k_hw_common(ah);
298 u16 headroom = sizeof(struct htc_frame_hdr) +
299 sizeof(struct wmi_cmd_hdr);
300 struct sk_buff *skb;
301 u8 *data;
302 unsigned long time_left;
303 int ret = 0;
304
305 if (ah->ah_flags & AH_UNPLUGGED)
306 return 0;
307
308 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
309 if (!skb)
310 return -ENOMEM;
311
312 skb_reserve(skb, headroom);
313
314 if (cmd_len != 0 && cmd_buf != NULL) {
315 data = (u8 *) skb_put(skb, cmd_len);
316 memcpy(data, cmd_buf, cmd_len);
317 }
318
319 mutex_lock(&wmi->op_mutex);
320
321 /* check if wmi stopped flag is set */
322 if (unlikely(wmi->stopped)) {
323 ret = -EPROTO;
324 goto out;
325 }
326
327 /* record the rsp buffer and length */
328 wmi->cmd_rsp_buf = rsp_buf;
329 wmi->cmd_rsp_len = rsp_len;
330
331 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
332 if (ret)
333 goto out;
334
335 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
336 if (!time_left) {
337 ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
338 wmi_cmd_to_name(cmd_id));
339 mutex_unlock(&wmi->op_mutex);
340 return -ETIMEDOUT;
341 }
342
343 mutex_unlock(&wmi->op_mutex);
344
345 return 0;
346
347out:
348 ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
349 mutex_unlock(&wmi->op_mutex);
350 kfree_skb(skb);
351
352 return ret;
353}