ath10k: implement prb tmpl wmi command
[deliverable/linux.git] / drivers / net / wireless / ath / ath10k / wmi-ops.h
CommitLineData
d7579d12
MK
1/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#ifndef _WMI_OPS_H_
19#define _WMI_OPS_H_
20
21struct ath10k;
22struct sk_buff;
23
24struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_ev_arg *arg);
42 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_svc_rdy_ev_arg *arg);
44 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_rdy_ev_arg *arg);
46 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
47 struct ath10k_fw_stats *stats);
48
49 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
50 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
51 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
52 u16 rd5g, u16 ctl2g, u16 ctl5g,
53 enum wmi_dfs_region dfs_reg);
54 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
55 u32 value);
56 struct sk_buff *(*gen_init)(struct ath10k *ar);
57 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
58 const struct wmi_start_scan_arg *arg);
59 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
60 const struct wmi_stop_scan_arg *arg);
61 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
62 enum wmi_vdev_type type,
63 enum wmi_vdev_subtype subtype,
64 const u8 macaddr[ETH_ALEN]);
65 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
66 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
67 const struct wmi_vdev_start_request_arg *arg,
68 bool restart);
69 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
70 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
71 const u8 *bssid);
72 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
74 u32 param_id, u32 param_value);
75 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
76 const struct wmi_vdev_install_key_arg *arg);
77 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
78 const struct wmi_vdev_spectral_conf_arg *arg);
79 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
80 u32 trigger, u32 enable);
81 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
82 const u8 peer_addr[ETH_ALEN]);
83 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
84 const u8 peer_addr[ETH_ALEN]);
85 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
86 const u8 peer_addr[ETH_ALEN],
87 u32 tid_bitmap);
88 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
89 const u8 *peer_addr,
90 enum wmi_peer_param param_id,
91 u32 param_value);
92 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
93 const struct wmi_peer_assoc_complete_arg *arg);
94 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
95 enum wmi_sta_ps_mode psmode);
96 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
97 enum wmi_sta_powersave_param param_id,
98 u32 value);
99 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
100 const u8 *mac,
101 enum wmi_ap_ps_peer_param param_id,
102 u32 value);
103 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
104 const struct wmi_scan_chan_list_arg *arg);
105 struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif);
106 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
107 const struct wmi_pdev_set_wmm_params_arg *arg);
108 struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
109 enum wmi_stats_id stats_id);
110 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
111 enum wmi_force_fw_hang_type type,
112 u32 delay_ms);
113 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
114 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable);
115 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
116 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
ffdd738d
RM
117 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
118 u32 period, u32 duration,
119 u32 next_offset,
120 u32 enabled);
a57a6a27 121 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
dc8ab278
RM
122 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
123 const u8 *mac);
65c0893d
RM
124 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
125 const u8 *mac, u32 tid, u32 buf_size);
11597413
RM
126 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
127 const u8 *mac, u32 tid,
128 u32 status);
50abef85
RM
129 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
130 const u8 *mac, u32 tid, u32 initiator,
131 u32 reason);
be9ce9d8
MK
132 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
133 u32 tim_ie_offset, struct sk_buff *bcn,
134 u32 prb_caps, u32 prb_erp,
135 void *prb_ies, size_t prb_ies_len);
4c4955fe
MK
136 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
137 struct sk_buff *bcn);
d7579d12
MK
138};
139
140int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
141
142static inline int
143ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
144{
145 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
146 return -EOPNOTSUPP;
147
148 ar->wmi.ops->rx(ar, skb);
149 return 0;
150}
151
152static inline int
153ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
154 size_t len)
155{
156 if (!ar->wmi.ops->map_svc)
157 return -EOPNOTSUPP;
158
159 ar->wmi.ops->map_svc(in, out, len);
160 return 0;
161}
162
163static inline int
164ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
165 struct wmi_scan_ev_arg *arg)
166{
167 if (!ar->wmi.ops->pull_scan)
168 return -EOPNOTSUPP;
169
170 return ar->wmi.ops->pull_scan(ar, skb, arg);
171}
172
173static inline int
174ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
175 struct wmi_mgmt_rx_ev_arg *arg)
176{
177 if (!ar->wmi.ops->pull_mgmt_rx)
178 return -EOPNOTSUPP;
179
180 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
181}
182
183static inline int
184ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
185 struct wmi_ch_info_ev_arg *arg)
186{
187 if (!ar->wmi.ops->pull_ch_info)
188 return -EOPNOTSUPP;
189
190 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
191}
192
193static inline int
194ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
195 struct wmi_vdev_start_ev_arg *arg)
196{
197 if (!ar->wmi.ops->pull_vdev_start)
198 return -EOPNOTSUPP;
199
200 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
201}
202
203static inline int
204ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
205 struct wmi_peer_kick_ev_arg *arg)
206{
207 if (!ar->wmi.ops->pull_peer_kick)
208 return -EOPNOTSUPP;
209
210 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
211}
212
213static inline int
214ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
215 struct wmi_swba_ev_arg *arg)
216{
217 if (!ar->wmi.ops->pull_swba)
218 return -EOPNOTSUPP;
219
220 return ar->wmi.ops->pull_swba(ar, skb, arg);
221}
222
223static inline int
224ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
225 struct wmi_phyerr_ev_arg *arg)
226{
227 if (!ar->wmi.ops->pull_phyerr)
228 return -EOPNOTSUPP;
229
230 return ar->wmi.ops->pull_phyerr(ar, skb, arg);
231}
232
233static inline int
234ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
235 struct wmi_svc_rdy_ev_arg *arg)
236{
237 if (!ar->wmi.ops->pull_svc_rdy)
238 return -EOPNOTSUPP;
239
240 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
241}
242
243static inline int
244ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
245 struct wmi_rdy_ev_arg *arg)
246{
247 if (!ar->wmi.ops->pull_rdy)
248 return -EOPNOTSUPP;
249
250 return ar->wmi.ops->pull_rdy(ar, skb, arg);
251}
252
253static inline int
254ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
255 struct ath10k_fw_stats *stats)
256{
257 if (!ar->wmi.ops->pull_fw_stats)
258 return -EOPNOTSUPP;
259
260 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
261}
262
263static inline int
264ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
265{
266 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
267 struct sk_buff *skb;
268 int ret;
269
270 if (!ar->wmi.ops->gen_mgmt_tx)
271 return -EOPNOTSUPP;
272
273 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
274 if (IS_ERR(skb))
275 return PTR_ERR(skb);
276
277 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
278 if (ret)
279 return ret;
280
281 /* FIXME There's no ACK event for Management Tx. This probably
282 * shouldn't be called here either. */
283 info->flags |= IEEE80211_TX_STAT_ACK;
284 ieee80211_tx_status_irqsafe(ar->hw, msdu);
285
286 return 0;
287}
288
289static inline int
290ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
291 u16 ctl2g, u16 ctl5g,
292 enum wmi_dfs_region dfs_reg)
293{
294 struct sk_buff *skb;
295
296 if (!ar->wmi.ops->gen_pdev_set_rd)
297 return -EOPNOTSUPP;
298
299 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
300 dfs_reg);
301 if (IS_ERR(skb))
302 return PTR_ERR(skb);
303
304 return ath10k_wmi_cmd_send(ar, skb,
305 ar->wmi.cmd->pdev_set_regdomain_cmdid);
306}
307
308static inline int
309ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
310{
311 struct sk_buff *skb;
312
313 if (!ar->wmi.ops->gen_pdev_suspend)
314 return -EOPNOTSUPP;
315
316 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
317 if (IS_ERR(skb))
318 return PTR_ERR(skb);
319
320 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
321}
322
323static inline int
324ath10k_wmi_pdev_resume_target(struct ath10k *ar)
325{
326 struct sk_buff *skb;
327
328 if (!ar->wmi.ops->gen_pdev_resume)
329 return -EOPNOTSUPP;
330
331 skb = ar->wmi.ops->gen_pdev_resume(ar);
332 if (IS_ERR(skb))
333 return PTR_ERR(skb);
334
335 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
336}
337
338static inline int
339ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
340{
341 struct sk_buff *skb;
342
343 if (!ar->wmi.ops->gen_pdev_set_param)
344 return -EOPNOTSUPP;
345
346 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
347 if (IS_ERR(skb))
348 return PTR_ERR(skb);
349
350 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
351}
352
353static inline int
354ath10k_wmi_cmd_init(struct ath10k *ar)
355{
356 struct sk_buff *skb;
357
358 if (!ar->wmi.ops->gen_init)
359 return -EOPNOTSUPP;
360
361 skb = ar->wmi.ops->gen_init(ar);
362 if (IS_ERR(skb))
363 return PTR_ERR(skb);
364
365 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
366}
367
368static inline int
369ath10k_wmi_start_scan(struct ath10k *ar,
370 const struct wmi_start_scan_arg *arg)
371{
372 struct sk_buff *skb;
373
374 if (!ar->wmi.ops->gen_start_scan)
375 return -EOPNOTSUPP;
376
377 skb = ar->wmi.ops->gen_start_scan(ar, arg);
378 if (IS_ERR(skb))
379 return PTR_ERR(skb);
380
381 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
382}
383
384static inline int
385ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
386{
387 struct sk_buff *skb;
388
389 if (!ar->wmi.ops->gen_stop_scan)
390 return -EOPNOTSUPP;
391
392 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
393 if (IS_ERR(skb))
394 return PTR_ERR(skb);
395
396 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
397}
398
399static inline int
400ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
401 enum wmi_vdev_type type,
402 enum wmi_vdev_subtype subtype,
403 const u8 macaddr[ETH_ALEN])
404{
405 struct sk_buff *skb;
406
407 if (!ar->wmi.ops->gen_vdev_create)
408 return -EOPNOTSUPP;
409
410 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
411 if (IS_ERR(skb))
412 return PTR_ERR(skb);
413
414 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
415}
416
417static inline int
418ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
419{
420 struct sk_buff *skb;
421
422 if (!ar->wmi.ops->gen_vdev_delete)
423 return -EOPNOTSUPP;
424
425 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
426 if (IS_ERR(skb))
427 return PTR_ERR(skb);
428
429 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
430}
431
432static inline int
433ath10k_wmi_vdev_start(struct ath10k *ar,
434 const struct wmi_vdev_start_request_arg *arg)
435{
436 struct sk_buff *skb;
437
438 if (!ar->wmi.ops->gen_vdev_start)
439 return -EOPNOTSUPP;
440
441 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
442 if (IS_ERR(skb))
443 return PTR_ERR(skb);
444
445 return ath10k_wmi_cmd_send(ar, skb,
446 ar->wmi.cmd->vdev_start_request_cmdid);
447}
448
449static inline int
450ath10k_wmi_vdev_restart(struct ath10k *ar,
451 const struct wmi_vdev_start_request_arg *arg)
452{
453 struct sk_buff *skb;
454
455 if (!ar->wmi.ops->gen_vdev_start)
456 return -EOPNOTSUPP;
457
458 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
459 if (IS_ERR(skb))
460 return PTR_ERR(skb);
461
462 return ath10k_wmi_cmd_send(ar, skb,
463 ar->wmi.cmd->vdev_restart_request_cmdid);
464}
465
466static inline int
467ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
468{
469 struct sk_buff *skb;
470
471 if (!ar->wmi.ops->gen_vdev_stop)
472 return -EOPNOTSUPP;
473
474 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
475 if (IS_ERR(skb))
476 return PTR_ERR(skb);
477
478 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
479}
480
481static inline int
482ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
483{
484 struct sk_buff *skb;
485
486 if (!ar->wmi.ops->gen_vdev_up)
487 return -EOPNOTSUPP;
488
489 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
490 if (IS_ERR(skb))
491 return PTR_ERR(skb);
492
493 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
494}
495
496static inline int
497ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
498{
499 struct sk_buff *skb;
500
501 if (!ar->wmi.ops->gen_vdev_down)
502 return -EOPNOTSUPP;
503
504 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
505 if (IS_ERR(skb))
506 return PTR_ERR(skb);
507
508 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
509}
510
511static inline int
512ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
513 u32 param_value)
514{
515 struct sk_buff *skb;
516
517 if (!ar->wmi.ops->gen_vdev_set_param)
518 return -EOPNOTSUPP;
519
520 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
521 param_value);
522 if (IS_ERR(skb))
523 return PTR_ERR(skb);
524
525 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
526}
527
528static inline int
529ath10k_wmi_vdev_install_key(struct ath10k *ar,
530 const struct wmi_vdev_install_key_arg *arg)
531{
532 struct sk_buff *skb;
533
534 if (!ar->wmi.ops->gen_vdev_install_key)
535 return -EOPNOTSUPP;
536
537 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
538 if (IS_ERR(skb))
539 return PTR_ERR(skb);
540
541 return ath10k_wmi_cmd_send(ar, skb,
542 ar->wmi.cmd->vdev_install_key_cmdid);
543}
544
545static inline int
546ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
547 const struct wmi_vdev_spectral_conf_arg *arg)
548{
549 struct sk_buff *skb;
550 u32 cmd_id;
551
552 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
553 if (IS_ERR(skb))
554 return PTR_ERR(skb);
555
556 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
557 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
558}
559
560static inline int
561ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
562 u32 enable)
563{
564 struct sk_buff *skb;
565 u32 cmd_id;
566
567 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
568 enable);
569 if (IS_ERR(skb))
570 return PTR_ERR(skb);
571
572 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
573 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
574}
575
576static inline int
577ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
578 const u8 peer_addr[ETH_ALEN])
579{
580 struct sk_buff *skb;
581
582 if (!ar->wmi.ops->gen_peer_create)
583 return -EOPNOTSUPP;
584
585 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
586 if (IS_ERR(skb))
587 return PTR_ERR(skb);
588
589 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
590}
591
592static inline int
593ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
594 const u8 peer_addr[ETH_ALEN])
595{
596 struct sk_buff *skb;
597
598 if (!ar->wmi.ops->gen_peer_delete)
599 return -EOPNOTSUPP;
600
601 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
602 if (IS_ERR(skb))
603 return PTR_ERR(skb);
604
605 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
606}
607
608static inline int
609ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
610 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
611{
612 struct sk_buff *skb;
613
614 if (!ar->wmi.ops->gen_peer_flush)
615 return -EOPNOTSUPP;
616
617 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
618 if (IS_ERR(skb))
619 return PTR_ERR(skb);
620
621 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
622}
623
624static inline int
625ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
626 enum wmi_peer_param param_id, u32 param_value)
627{
628 struct sk_buff *skb;
629
630 if (!ar->wmi.ops->gen_peer_set_param)
631 return -EOPNOTSUPP;
632
633 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
634 param_value);
635 if (IS_ERR(skb))
636 return PTR_ERR(skb);
637
638 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
639}
640
641static inline int
642ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
643 enum wmi_sta_ps_mode psmode)
644{
645 struct sk_buff *skb;
646
647 if (!ar->wmi.ops->gen_set_psmode)
648 return -EOPNOTSUPP;
649
650 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
651 if (IS_ERR(skb))
652 return PTR_ERR(skb);
653
654 return ath10k_wmi_cmd_send(ar, skb,
655 ar->wmi.cmd->sta_powersave_mode_cmdid);
656}
657
658static inline int
659ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
660 enum wmi_sta_powersave_param param_id, u32 value)
661{
662 struct sk_buff *skb;
663
664 if (!ar->wmi.ops->gen_set_sta_ps)
665 return -EOPNOTSUPP;
666
667 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
668 if (IS_ERR(skb))
669 return PTR_ERR(skb);
670
671 return ath10k_wmi_cmd_send(ar, skb,
672 ar->wmi.cmd->sta_powersave_param_cmdid);
673}
674
675static inline int
676ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
677 enum wmi_ap_ps_peer_param param_id, u32 value)
678{
679 struct sk_buff *skb;
680
681 if (!ar->wmi.ops->gen_set_ap_ps)
682 return -EOPNOTSUPP;
683
684 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
685 if (IS_ERR(skb))
686 return PTR_ERR(skb);
687
688 return ath10k_wmi_cmd_send(ar, skb,
689 ar->wmi.cmd->ap_ps_peer_param_cmdid);
690}
691
692static inline int
693ath10k_wmi_scan_chan_list(struct ath10k *ar,
694 const struct wmi_scan_chan_list_arg *arg)
695{
696 struct sk_buff *skb;
697
698 if (!ar->wmi.ops->gen_scan_chan_list)
699 return -EOPNOTSUPP;
700
701 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
702 if (IS_ERR(skb))
703 return PTR_ERR(skb);
704
705 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
706}
707
708static inline int
709ath10k_wmi_peer_assoc(struct ath10k *ar,
710 const struct wmi_peer_assoc_complete_arg *arg)
711{
712 struct sk_buff *skb;
713
714 if (!ar->wmi.ops->gen_peer_assoc)
715 return -EOPNOTSUPP;
716
717 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
718 if (IS_ERR(skb))
719 return PTR_ERR(skb);
720
721 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
722}
723
724static inline int
725ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
726{
727 struct ath10k *ar = arvif->ar;
728 struct sk_buff *skb;
729 int ret;
730
731 if (!ar->wmi.ops->gen_beacon_dma)
732 return -EOPNOTSUPP;
733
734 skb = ar->wmi.ops->gen_beacon_dma(arvif);
735 if (IS_ERR(skb))
736 return PTR_ERR(skb);
737
738 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
739 ar->wmi.cmd->pdev_send_bcn_cmdid);
740 if (ret) {
741 dev_kfree_skb(skb);
742 return ret;
743 }
744
745 return 0;
746}
747
748static inline int
749ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
750 const struct wmi_pdev_set_wmm_params_arg *arg)
751{
752 struct sk_buff *skb;
753
754 if (!ar->wmi.ops->gen_pdev_set_wmm)
755 return -EOPNOTSUPP;
756
757 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
758 if (IS_ERR(skb))
759 return PTR_ERR(skb);
760
761 return ath10k_wmi_cmd_send(ar, skb,
762 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
763}
764
765static inline int
766ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
767{
768 struct sk_buff *skb;
769
770 if (!ar->wmi.ops->gen_request_stats)
771 return -EOPNOTSUPP;
772
773 skb = ar->wmi.ops->gen_request_stats(ar, stats_id);
774 if (IS_ERR(skb))
775 return PTR_ERR(skb);
776
777 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
778}
779
780static inline int
781ath10k_wmi_force_fw_hang(struct ath10k *ar,
782 enum wmi_force_fw_hang_type type, u32 delay_ms)
783{
784 struct sk_buff *skb;
785
786 if (!ar->wmi.ops->gen_force_fw_hang)
787 return -EOPNOTSUPP;
788
789 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
790 if (IS_ERR(skb))
791 return PTR_ERR(skb);
792
793 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
794}
795
796static inline int
797ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
798{
799 struct sk_buff *skb;
800
801 if (!ar->wmi.ops->gen_dbglog_cfg)
802 return -EOPNOTSUPP;
803
804 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable);
805 if (IS_ERR(skb))
806 return PTR_ERR(skb);
807
808 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
809}
810
811static inline int
812ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
813{
814 struct sk_buff *skb;
815
816 if (!ar->wmi.ops->gen_pktlog_enable)
817 return -EOPNOTSUPP;
818
819 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
820 if (IS_ERR(skb))
821 return PTR_ERR(skb);
822
823 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
824}
825
826static inline int
827ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
828{
829 struct sk_buff *skb;
830
831 if (!ar->wmi.ops->gen_pktlog_disable)
832 return -EOPNOTSUPP;
833
834 skb = ar->wmi.ops->gen_pktlog_disable(ar);
835 if (IS_ERR(skb))
836 return PTR_ERR(skb);
837
838 return ath10k_wmi_cmd_send(ar, skb,
839 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
840}
841
ffdd738d
RM
842static inline int
843ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
844 u32 next_offset, u32 enabled)
845{
846 struct sk_buff *skb;
847
848 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
849 return -EOPNOTSUPP;
850
851 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
852 next_offset, enabled);
853 if (IS_ERR(skb))
854 return PTR_ERR(skb);
855
856 return ath10k_wmi_cmd_send(ar, skb,
857 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
858}
859
a57a6a27
RM
860static inline int
861ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
862{
863 struct sk_buff *skb;
864
865 if (!ar->wmi.ops->gen_pdev_get_temperature)
866 return -EOPNOTSUPP;
867
868 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
869 if (IS_ERR(skb))
870 return PTR_ERR(skb);
871
872 return ath10k_wmi_cmd_send(ar, skb,
873 ar->wmi.cmd->pdev_get_temperature_cmdid);
874}
875
dc8ab278
RM
876static inline int
877ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
878{
879 struct sk_buff *skb;
880
881 if (!ar->wmi.ops->gen_addba_clear_resp)
882 return -EOPNOTSUPP;
883
884 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
885 if (IS_ERR(skb))
886 return PTR_ERR(skb);
887
888 return ath10k_wmi_cmd_send(ar, skb,
889 ar->wmi.cmd->addba_clear_resp_cmdid);
890}
891
65c0893d
RM
892static inline int
893ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
894 u32 tid, u32 buf_size)
895{
896 struct sk_buff *skb;
897
898 if (!ar->wmi.ops->gen_addba_send)
899 return -EOPNOTSUPP;
900
901 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
902 if (IS_ERR(skb))
903 return PTR_ERR(skb);
904
905 return ath10k_wmi_cmd_send(ar, skb,
906 ar->wmi.cmd->addba_send_cmdid);
907}
908
11597413
RM
909static inline int
910ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
911 u32 tid, u32 status)
912{
913 struct sk_buff *skb;
914
915 if (!ar->wmi.ops->gen_addba_set_resp)
916 return -EOPNOTSUPP;
917
918 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
919 if (IS_ERR(skb))
920 return PTR_ERR(skb);
921
922 return ath10k_wmi_cmd_send(ar, skb,
923 ar->wmi.cmd->addba_set_resp_cmdid);
924}
925
50abef85
RM
926static inline int
927ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
928 u32 tid, u32 initiator, u32 reason)
929{
930 struct sk_buff *skb;
931
932 if (!ar->wmi.ops->gen_delba_send)
933 return -EOPNOTSUPP;
934
935 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
936 reason);
937 if (IS_ERR(skb))
938 return PTR_ERR(skb);
939
940 return ath10k_wmi_cmd_send(ar, skb,
941 ar->wmi.cmd->delba_send_cmdid);
942}
943
be9ce9d8
MK
944static inline int
945ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
946 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
947 void *prb_ies, size_t prb_ies_len)
948{
949 struct sk_buff *skb;
950
951 if (!ar->wmi.ops->gen_bcn_tmpl)
952 return -EOPNOTSUPP;
953
954 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
955 prb_caps, prb_erp, prb_ies,
956 prb_ies_len);
957 if (IS_ERR(skb))
958 return PTR_ERR(skb);
959
960 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
961}
962
4c4955fe
MK
963static inline int
964ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
965{
966 struct sk_buff *skb;
967
968 if (!ar->wmi.ops->gen_prb_tmpl)
969 return -EOPNOTSUPP;
970
971 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
972 if (IS_ERR(skb))
973 return PTR_ERR(skb);
974
975 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
976}
977
d7579d12 978#endif
This page took 0.064315 seconds and 5 git commands to generate.