1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2010 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
64 #include <linux/export.h>
65 #include <net/netlink.h>
70 #include "iwl-trans.h"
73 #include "iwl-testmode.h"
76 * Periphery registers absolute lower bound. This is used in order to
77 * differentiate registery access through HBUS_TARG_PRPH_* and
78 * HBUS_TARG_MEM_* accesses.
80 #define IWL_ABS_PRPH_START (0xA00000)
83 * The TLVs used in the gnl message policy between the kernel module and
84 * user space application. iwl_testmode_gnl_msg_policy is to be carried
85 * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
89 struct nla_policy iwl_testmode_gnl_msg_policy
[IWL_TM_ATTR_MAX
] = {
90 [IWL_TM_ATTR_COMMAND
] = { .type
= NLA_U32
, },
92 [IWL_TM_ATTR_UCODE_CMD_ID
] = { .type
= NLA_U8
, },
93 [IWL_TM_ATTR_UCODE_CMD_DATA
] = { .type
= NLA_UNSPEC
, },
95 [IWL_TM_ATTR_REG_OFFSET
] = { .type
= NLA_U32
, },
96 [IWL_TM_ATTR_REG_VALUE8
] = { .type
= NLA_U8
, },
97 [IWL_TM_ATTR_REG_VALUE32
] = { .type
= NLA_U32
, },
99 [IWL_TM_ATTR_SYNC_RSP
] = { .type
= NLA_UNSPEC
, },
100 [IWL_TM_ATTR_UCODE_RX_PKT
] = { .type
= NLA_UNSPEC
, },
102 [IWL_TM_ATTR_EEPROM
] = { .type
= NLA_UNSPEC
, },
104 [IWL_TM_ATTR_TRACE_ADDR
] = { .type
= NLA_UNSPEC
, },
105 [IWL_TM_ATTR_TRACE_DUMP
] = { .type
= NLA_UNSPEC
, },
106 [IWL_TM_ATTR_TRACE_SIZE
] = { .type
= NLA_U32
, },
108 [IWL_TM_ATTR_FIXRATE
] = { .type
= NLA_U32
, },
110 [IWL_TM_ATTR_UCODE_OWNER
] = { .type
= NLA_U8
, },
112 [IWL_TM_ATTR_MEM_ADDR
] = { .type
= NLA_U32
, },
113 [IWL_TM_ATTR_BUFFER_SIZE
] = { .type
= NLA_U32
, },
114 [IWL_TM_ATTR_BUFFER_DUMP
] = { .type
= NLA_UNSPEC
, },
116 [IWL_TM_ATTR_FW_VERSION
] = { .type
= NLA_U32
, },
117 [IWL_TM_ATTR_DEVICE_ID
] = { .type
= NLA_U32
, },
118 [IWL_TM_ATTR_FW_TYPE
] = { .type
= NLA_U32
, },
119 [IWL_TM_ATTR_FW_INST_SIZE
] = { .type
= NLA_U32
, },
120 [IWL_TM_ATTR_FW_DATA_SIZE
] = { .type
= NLA_U32
, },
122 [IWL_TM_ATTR_ENABLE_NOTIFICATION
] = {.type
= NLA_FLAG
, },
125 static inline void iwl_test_trace_clear(struct iwl_test
*tst
)
127 memset(&tst
->trace
, 0, sizeof(struct iwl_test_trace
));
130 static void iwl_test_trace_stop(struct iwl_test
*tst
)
132 if (!tst
->trace
.enabled
)
135 if (tst
->trace
.cpu_addr
&& tst
->trace
.dma_addr
)
136 dma_free_coherent(tst
->trans
->dev
,
139 tst
->trace
.dma_addr
);
141 iwl_test_trace_clear(tst
);
144 static inline void iwl_test_mem_clear(struct iwl_test
*tst
)
146 memset(&tst
->mem
, 0, sizeof(struct iwl_test_mem
));
149 static inline void iwl_test_mem_stop(struct iwl_test
*tst
)
151 if (!tst
->mem
.in_read
)
154 iwl_test_mem_clear(tst
);
158 * Initializes the test object
159 * During the lifetime of the test object it is assumed that the transport is
160 * started. The test object should be stopped before the transport is stopped.
162 void iwl_test_init(struct iwl_test
*tst
, struct iwl_trans
*trans
,
163 struct iwl_test_ops
*ops
)
168 iwl_test_trace_clear(tst
);
169 iwl_test_mem_clear(tst
);
171 EXPORT_SYMBOL_GPL(iwl_test_init
);
174 * Stop the test object
176 void iwl_test_free(struct iwl_test
*tst
)
178 iwl_test_mem_stop(tst
);
179 iwl_test_trace_stop(tst
);
181 EXPORT_SYMBOL_GPL(iwl_test_free
);
183 static inline int iwl_test_send_cmd(struct iwl_test
*tst
,
184 struct iwl_host_cmd
*cmd
)
186 return tst
->ops
->send_cmd(tst
->trans
->op_mode
, cmd
);
189 static inline bool iwl_test_valid_hw_addr(struct iwl_test
*tst
, u32 addr
)
191 return tst
->ops
->valid_hw_addr(addr
);
194 static inline u32
iwl_test_fw_ver(struct iwl_test
*tst
)
196 return tst
->ops
->get_fw_ver(tst
->trans
->op_mode
);
199 static inline struct sk_buff
*
200 iwl_test_alloc_reply(struct iwl_test
*tst
, int len
)
202 return tst
->ops
->alloc_reply(tst
->trans
->op_mode
, len
);
205 static inline int iwl_test_reply(struct iwl_test
*tst
, struct sk_buff
*skb
)
207 return tst
->ops
->reply(tst
->trans
->op_mode
, skb
);
210 static inline struct sk_buff
*
211 iwl_test_alloc_event(struct iwl_test
*tst
, int len
)
213 return tst
->ops
->alloc_event(tst
->trans
->op_mode
, len
);
217 iwl_test_event(struct iwl_test
*tst
, struct sk_buff
*skb
)
219 return tst
->ops
->event(tst
->trans
->op_mode
, skb
);
223 * This function handles the user application commands to the fw. The fw
224 * commands are sent in a synchronuous manner. In case that the user requested
225 * to get commands response, it is send to the user.
227 static int iwl_test_fw_cmd(struct iwl_test
*tst
, struct nlattr
**tb
)
229 struct iwl_host_cmd cmd
;
230 struct iwl_rx_packet
*pkt
;
237 memset(&cmd
, 0, sizeof(struct iwl_host_cmd
));
239 if (!tb
[IWL_TM_ATTR_UCODE_CMD_ID
] ||
240 !tb
[IWL_TM_ATTR_UCODE_CMD_DATA
]) {
241 IWL_ERR(tst
->trans
, "Missing fw command mandatory fields\n");
245 cmd
.flags
= CMD_ON_DEMAND
| CMD_SYNC
;
246 cmd_want_skb
= nla_get_flag(tb
[IWL_TM_ATTR_UCODE_CMD_SKB
]);
248 cmd
.flags
|= CMD_WANT_SKB
;
250 cmd
.id
= nla_get_u8(tb
[IWL_TM_ATTR_UCODE_CMD_ID
]);
251 cmd
.data
[0] = nla_data(tb
[IWL_TM_ATTR_UCODE_CMD_DATA
]);
252 cmd
.len
[0] = nla_len(tb
[IWL_TM_ATTR_UCODE_CMD_DATA
]);
253 cmd
.dataflags
[0] = IWL_HCMD_DFL_NOCOPY
;
254 IWL_DEBUG_INFO(tst
->trans
, "test fw cmd=0x%x, flags 0x%x, len %d\n",
255 cmd
.id
, cmd
.flags
, cmd
.len
[0]);
257 ret
= iwl_test_send_cmd(tst
, &cmd
);
259 IWL_ERR(tst
->trans
, "Failed to send hcmd\n");
265 /* Handling return of SKB to the user */
268 IWL_ERR(tst
->trans
, "HCMD received a null response packet\n");
272 reply_len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
273 skb
= iwl_test_alloc_reply(tst
, reply_len
+ 20);
274 reply_buf
= kmalloc(reply_len
, GFP_KERNEL
);
275 if (!skb
|| !reply_buf
) {
281 /* The reply is in a page, that we cannot send to user space. */
282 memcpy(reply_buf
, &(pkt
->hdr
), reply_len
);
285 if (nla_put_u32(skb
, IWL_TM_ATTR_COMMAND
,
286 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
) ||
287 nla_put(skb
, IWL_TM_ATTR_UCODE_RX_PKT
, reply_len
, reply_buf
))
288 goto nla_put_failure
;
289 return iwl_test_reply(tst
, skb
);
292 IWL_DEBUG_INFO(tst
->trans
, "Failed creating NL attributes\n");
299 * Handles the user application commands for register access.
301 static int iwl_test_reg(struct iwl_test
*tst
, struct nlattr
**tb
)
307 struct iwl_trans
*trans
= tst
->trans
;
309 if (!tb
[IWL_TM_ATTR_REG_OFFSET
]) {
310 IWL_ERR(trans
, "Missing reg offset\n");
314 ofs
= nla_get_u32(tb
[IWL_TM_ATTR_REG_OFFSET
]);
315 IWL_DEBUG_INFO(trans
, "test reg access cmd offset=0x%x\n", ofs
);
317 cmd
= nla_get_u32(tb
[IWL_TM_ATTR_COMMAND
]);
320 * Allow access only to FH/CSR/HBUS in direct mode.
321 * Since we don't have the upper bounds for the CSR and HBUS segments,
322 * we will use only the upper bound of FH for sanity check.
324 if (ofs
>= FH_MEM_UPPER_BOUND
) {
325 IWL_ERR(trans
, "offset out of segment (0x0 - 0x%x)\n",
331 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32
:
332 val32
= iwl_read_direct32(tst
->trans
, ofs
);
333 IWL_DEBUG_INFO(trans
, "32 value to read 0x%x\n", val32
);
335 skb
= iwl_test_alloc_reply(tst
, 20);
337 IWL_ERR(trans
, "Memory allocation fail\n");
340 if (nla_put_u32(skb
, IWL_TM_ATTR_REG_VALUE32
, val32
))
341 goto nla_put_failure
;
342 status
= iwl_test_reply(tst
, skb
);
344 IWL_ERR(trans
, "Error sending msg : %d\n", status
);
347 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32
:
348 if (!tb
[IWL_TM_ATTR_REG_VALUE32
]) {
349 IWL_ERR(trans
, "Missing value to write\n");
352 val32
= nla_get_u32(tb
[IWL_TM_ATTR_REG_VALUE32
]);
353 IWL_DEBUG_INFO(trans
, "32b write val=0x%x\n", val32
);
354 iwl_write_direct32(tst
->trans
, ofs
, val32
);
358 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8
:
359 if (!tb
[IWL_TM_ATTR_REG_VALUE8
]) {
360 IWL_ERR(trans
, "Missing value to write\n");
363 val8
= nla_get_u8(tb
[IWL_TM_ATTR_REG_VALUE8
]);
364 IWL_DEBUG_INFO(trans
, "8b write val=0x%x\n", val8
);
365 iwl_write8(tst
->trans
, ofs
, val8
);
370 IWL_ERR(trans
, "Unknown test register cmd ID\n");
382 * Handles the request to start FW tracing. Allocates of the trace buffer
383 * and sends a reply to user space with the address of the allocated buffer.
385 static int iwl_test_trace_begin(struct iwl_test
*tst
, struct nlattr
**tb
)
390 if (tst
->trace
.enabled
)
393 if (!tb
[IWL_TM_ATTR_TRACE_SIZE
])
394 tst
->trace
.size
= TRACE_BUFF_SIZE_DEF
;
397 nla_get_u32(tb
[IWL_TM_ATTR_TRACE_SIZE
]);
399 if (!tst
->trace
.size
)
402 if (tst
->trace
.size
< TRACE_BUFF_SIZE_MIN
||
403 tst
->trace
.size
> TRACE_BUFF_SIZE_MAX
)
406 tst
->trace
.tsize
= tst
->trace
.size
+ TRACE_BUFF_PADD
;
407 tst
->trace
.cpu_addr
= dma_alloc_coherent(tst
->trans
->dev
,
409 &tst
->trace
.dma_addr
,
411 if (!tst
->trace
.cpu_addr
)
414 tst
->trace
.enabled
= true;
415 tst
->trace
.trace_addr
= (u8
*)PTR_ALIGN(tst
->trace
.cpu_addr
, 0x100);
417 memset(tst
->trace
.trace_addr
, 0x03B, tst
->trace
.size
);
419 skb
= iwl_test_alloc_reply(tst
, sizeof(tst
->trace
.dma_addr
) + 20);
421 IWL_ERR(tst
->trans
, "Memory allocation fail\n");
422 iwl_test_trace_stop(tst
);
426 if (nla_put(skb
, IWL_TM_ATTR_TRACE_ADDR
,
427 sizeof(tst
->trace
.dma_addr
),
428 (u64
*)&tst
->trace
.dma_addr
))
429 goto nla_put_failure
;
431 status
= iwl_test_reply(tst
, skb
);
433 IWL_ERR(tst
->trans
, "Error sending msg : %d\n", status
);
435 tst
->trace
.nchunks
= DIV_ROUND_UP(tst
->trace
.size
,
442 if (nla_get_u32(tb
[IWL_TM_ATTR_COMMAND
]) ==
443 IWL_TM_CMD_APP2DEV_BEGIN_TRACE
)
444 iwl_test_trace_stop(tst
);
449 * Handles indirect read from the periphery or the SRAM. The read is performed
450 * to a temporary buffer. The user space application should later issue a dump
452 static int iwl_test_indirect_read(struct iwl_test
*tst
, u32 addr
, u32 size
)
454 struct iwl_trans
*trans
= tst
->trans
;
461 tst
->mem
.size
= size
;
462 tst
->mem
.addr
= kmalloc(tst
->mem
.size
, GFP_KERNEL
);
463 if (tst
->mem
.addr
== NULL
)
466 /* Hard-coded periphery absolute address */
467 if (IWL_ABS_PRPH_START
<= addr
&&
468 addr
< IWL_ABS_PRPH_START
+ PRPH_END
) {
469 if (!iwl_trans_grab_nic_access(trans
, false, &flags
)) {
472 iwl_write32(trans
, HBUS_TARG_PRPH_RADDR
,
474 for (i
= 0; i
< size
; i
+= 4)
475 *(u32
*)(tst
->mem
.addr
+ i
) =
476 iwl_read32(trans
, HBUS_TARG_PRPH_RDAT
);
477 iwl_trans_release_nic_access(trans
, &flags
);
478 } else { /* target memory (SRAM) */
479 iwl_trans_read_mem(trans
, addr
, tst
->mem
.addr
,
484 DIV_ROUND_UP(tst
->mem
.size
, DUMP_CHUNK_SIZE
);
485 tst
->mem
.in_read
= true;
491 * Handles indirect write to the periphery or SRAM. The is performed to a
494 static int iwl_test_indirect_write(struct iwl_test
*tst
, u32 addr
,
495 u32 size
, unsigned char *buf
)
497 struct iwl_trans
*trans
= tst
->trans
;
501 if (IWL_ABS_PRPH_START
<= addr
&&
502 addr
< IWL_ABS_PRPH_START
+ PRPH_END
) {
503 /* Periphery writes can be 1-3 bytes long, or DWORDs */
505 memcpy(&val
, buf
, size
);
506 if (!iwl_trans_grab_nic_access(trans
, false, &flags
))
508 iwl_write32(trans
, HBUS_TARG_PRPH_WADDR
,
509 (addr
& 0x0000FFFF) |
511 iwl_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
512 iwl_trans_release_nic_access(trans
, &flags
);
516 for (i
= 0; i
< size
; i
+= 4)
517 iwl_write_prph(trans
, addr
+i
,
520 } else if (iwl_test_valid_hw_addr(tst
, addr
)) {
521 iwl_trans_write_mem(trans
, addr
, buf
, size
/ 4);
529 * Handles the user application commands for indirect read/write
530 * to/from the periphery or the SRAM.
532 static int iwl_test_indirect_mem(struct iwl_test
*tst
, struct nlattr
**tb
)
537 /* Both read and write should be blocked, for atomicity */
538 if (tst
->mem
.in_read
)
541 cmd
= nla_get_u32(tb
[IWL_TM_ATTR_COMMAND
]);
542 if (!tb
[IWL_TM_ATTR_MEM_ADDR
]) {
543 IWL_ERR(tst
->trans
, "Error finding memory offset address\n");
546 addr
= nla_get_u32(tb
[IWL_TM_ATTR_MEM_ADDR
]);
547 if (!tb
[IWL_TM_ATTR_BUFFER_SIZE
]) {
548 IWL_ERR(tst
->trans
, "Error finding size for memory reading\n");
551 size
= nla_get_u32(tb
[IWL_TM_ATTR_BUFFER_SIZE
]);
553 if (cmd
== IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
) {
554 return iwl_test_indirect_read(tst
, addr
, size
);
556 if (!tb
[IWL_TM_ATTR_BUFFER_DUMP
])
558 buf
= (unsigned char *)nla_data(tb
[IWL_TM_ATTR_BUFFER_DUMP
]);
559 return iwl_test_indirect_write(tst
, addr
, size
, buf
);
564 * Enable notifications to user space
566 static int iwl_test_notifications(struct iwl_test
*tst
,
569 tst
->notify
= nla_get_flag(tb
[IWL_TM_ATTR_ENABLE_NOTIFICATION
]);
574 * Handles the request to get the device id
576 static int iwl_test_get_dev_id(struct iwl_test
*tst
, struct nlattr
**tb
)
578 u32 devid
= tst
->trans
->hw_id
;
582 IWL_DEBUG_INFO(tst
->trans
, "hw version: 0x%x\n", devid
);
584 skb
= iwl_test_alloc_reply(tst
, 20);
586 IWL_ERR(tst
->trans
, "Memory allocation fail\n");
590 if (nla_put_u32(skb
, IWL_TM_ATTR_DEVICE_ID
, devid
))
591 goto nla_put_failure
;
592 status
= iwl_test_reply(tst
, skb
);
594 IWL_ERR(tst
->trans
, "Error sending msg : %d\n", status
);
604 * Handles the request to get the FW version
606 static int iwl_test_get_fw_ver(struct iwl_test
*tst
, struct nlattr
**tb
)
610 u32 ver
= iwl_test_fw_ver(tst
);
612 IWL_DEBUG_INFO(tst
->trans
, "uCode version raw: 0x%x\n", ver
);
614 skb
= iwl_test_alloc_reply(tst
, 20);
616 IWL_ERR(tst
->trans
, "Memory allocation fail\n");
620 if (nla_put_u32(skb
, IWL_TM_ATTR_FW_VERSION
, ver
))
621 goto nla_put_failure
;
623 status
= iwl_test_reply(tst
, skb
);
625 IWL_ERR(tst
->trans
, "Error sending msg : %d\n", status
);
635 * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
637 int iwl_test_parse(struct iwl_test
*tst
, struct nlattr
**tb
,
642 result
= nla_parse(tb
, IWL_TM_ATTR_MAX
- 1, data
, len
,
643 iwl_testmode_gnl_msg_policy
);
645 IWL_ERR(tst
->trans
, "Fail parse gnl msg: %d\n", result
);
649 /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
650 if (!tb
[IWL_TM_ATTR_COMMAND
]) {
651 IWL_ERR(tst
->trans
, "Missing testmode command type\n");
656 EXPORT_SYMBOL_GPL(iwl_test_parse
);
659 * Handle test commands.
660 * Returns 1 for unknown commands (not handled by the test object); negative
661 * value in case of error.
663 int iwl_test_handle_cmd(struct iwl_test
*tst
, struct nlattr
**tb
)
667 switch (nla_get_u32(tb
[IWL_TM_ATTR_COMMAND
])) {
668 case IWL_TM_CMD_APP2DEV_UCODE
:
669 IWL_DEBUG_INFO(tst
->trans
, "test cmd to uCode\n");
670 result
= iwl_test_fw_cmd(tst
, tb
);
673 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32
:
674 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32
:
675 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8
:
676 IWL_DEBUG_INFO(tst
->trans
, "test cmd to register\n");
677 result
= iwl_test_reg(tst
, tb
);
680 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE
:
681 IWL_DEBUG_INFO(tst
->trans
, "test uCode trace cmd to driver\n");
682 result
= iwl_test_trace_begin(tst
, tb
);
685 case IWL_TM_CMD_APP2DEV_END_TRACE
:
686 iwl_test_trace_stop(tst
);
690 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ
:
691 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE
:
692 IWL_DEBUG_INFO(tst
->trans
, "test indirect memory cmd\n");
693 result
= iwl_test_indirect_mem(tst
, tb
);
696 case IWL_TM_CMD_APP2DEV_NOTIFICATIONS
:
697 IWL_DEBUG_INFO(tst
->trans
, "test notifications cmd\n");
698 result
= iwl_test_notifications(tst
, tb
);
701 case IWL_TM_CMD_APP2DEV_GET_FW_VERSION
:
702 IWL_DEBUG_INFO(tst
->trans
, "test get FW ver cmd\n");
703 result
= iwl_test_get_fw_ver(tst
, tb
);
706 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID
:
707 IWL_DEBUG_INFO(tst
->trans
, "test Get device ID cmd\n");
708 result
= iwl_test_get_dev_id(tst
, tb
);
712 IWL_DEBUG_INFO(tst
->trans
, "Unknown test command\n");
718 EXPORT_SYMBOL_GPL(iwl_test_handle_cmd
);
720 static int iwl_test_trace_dump(struct iwl_test
*tst
, struct sk_buff
*skb
,
721 struct netlink_callback
*cb
)
725 if (!tst
->trace
.enabled
|| !tst
->trace
.trace_addr
)
729 if (idx
>= tst
->trace
.nchunks
)
732 length
= DUMP_CHUNK_SIZE
;
733 if (((idx
+ 1) == tst
->trace
.nchunks
) &&
734 (tst
->trace
.size
% DUMP_CHUNK_SIZE
))
735 length
= tst
->trace
.size
%
738 if (nla_put(skb
, IWL_TM_ATTR_TRACE_DUMP
, length
,
739 tst
->trace
.trace_addr
+ (DUMP_CHUNK_SIZE
* idx
)))
740 goto nla_put_failure
;
749 static int iwl_test_buffer_dump(struct iwl_test
*tst
, struct sk_buff
*skb
,
750 struct netlink_callback
*cb
)
754 if (!tst
->mem
.in_read
)
758 if (idx
>= tst
->mem
.nchunks
) {
759 iwl_test_mem_stop(tst
);
763 length
= DUMP_CHUNK_SIZE
;
764 if (((idx
+ 1) == tst
->mem
.nchunks
) &&
765 (tst
->mem
.size
% DUMP_CHUNK_SIZE
))
766 length
= tst
->mem
.size
% DUMP_CHUNK_SIZE
;
768 if (nla_put(skb
, IWL_TM_ATTR_BUFFER_DUMP
, length
,
769 tst
->mem
.addr
+ (DUMP_CHUNK_SIZE
* idx
)))
770 goto nla_put_failure
;
780 * Handle dump commands.
781 * Returns 1 for unknown commands (not handled by the test object); negative
782 * value in case of error.
784 int iwl_test_dump(struct iwl_test
*tst
, u32 cmd
, struct sk_buff
*skb
,
785 struct netlink_callback
*cb
)
790 case IWL_TM_CMD_APP2DEV_READ_TRACE
:
791 IWL_DEBUG_INFO(tst
->trans
, "uCode trace cmd\n");
792 result
= iwl_test_trace_dump(tst
, skb
, cb
);
795 case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP
:
796 IWL_DEBUG_INFO(tst
->trans
, "testmode sram dump cmd\n");
797 result
= iwl_test_buffer_dump(tst
, skb
, cb
);
806 EXPORT_SYMBOL_GPL(iwl_test_dump
);
809 * Multicast a spontaneous messages from the device to the user space.
811 static void iwl_test_send_rx(struct iwl_test
*tst
,
812 struct iwl_rx_cmd_buffer
*rxb
)
815 struct iwl_rx_packet
*data
;
818 data
= rxb_addr(rxb
);
819 length
= le32_to_cpu(data
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
821 /* the length doesn't include len_n_flags field, so add it manually */
822 length
+= sizeof(__le32
);
824 skb
= iwl_test_alloc_event(tst
, length
+ 20);
826 IWL_ERR(tst
->trans
, "Out of memory for message to user\n");
830 if (nla_put_u32(skb
, IWL_TM_ATTR_COMMAND
,
831 IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
) ||
832 nla_put(skb
, IWL_TM_ATTR_UCODE_RX_PKT
, length
, data
))
833 goto nla_put_failure
;
835 iwl_test_event(tst
, skb
);
840 IWL_ERR(tst
->trans
, "Ouch, overran buffer, check allocation!\n");
844 * Called whenever a Rx frames is recevied from the device. If notifications to
845 * the user space are requested, sends the frames to the user.
847 void iwl_test_rx(struct iwl_test
*tst
, struct iwl_rx_cmd_buffer
*rxb
)
850 iwl_test_send_rx(tst
, rxb
);
852 EXPORT_SYMBOL_GPL(iwl_test_rx
);