3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
22 #include <linux/mei.h>
29 * mei_me_cl_by_uuid - locate index of me client
32 * returns me client index or -ENOENT if not found
34 int mei_me_cl_by_uuid(const struct mei_device
*dev
, const uuid_le
*uuid
)
38 for (i
= 0; i
< dev
->me_clients_num
; ++i
)
39 if (uuid_le_cmp(*uuid
,
40 dev
->me_clients
[i
].props
.protocol_name
) == 0) {
50 * mei_me_cl_by_id return index to me_clients for client_id
52 * @dev: the device structure
53 * @client_id: me client id
55 * Locking: called under "dev->device_lock" lock
57 * returns index on success, -ENOENT on failure.
60 int mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
63 for (i
= 0; i
< dev
->me_clients_num
; i
++)
64 if (dev
->me_clients
[i
].client_id
== client_id
)
66 if (WARN_ON(dev
->me_clients
[i
].client_id
!= client_id
))
69 if (i
== dev
->me_clients_num
)
77 * mei_io_list_flush - removes list entry belonging to cl.
79 * @list: An instance of our list structure
82 void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
85 struct mei_cl_cb
*next
;
87 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
88 if (cb
->cl
&& mei_cl_cmp_id(cl
, cb
->cl
))
94 * mei_io_cb_free - free mei_cb_private related memory
96 * @cb: mei callback struct
98 void mei_io_cb_free(struct mei_cl_cb
*cb
)
103 kfree(cb
->request_buffer
.data
);
104 kfree(cb
->response_buffer
.data
);
109 * mei_io_cb_init - allocate and initialize io callback
112 * @fp: pointer to file structure
114 * returns mei_cl_cb pointer or NULL;
116 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
118 struct mei_cl_cb
*cb
;
120 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
124 mei_io_list_init(cb
);
126 cb
->file_object
= fp
;
133 * mei_io_cb_alloc_req_buf - allocate request buffer
135 * @cb: io callback structure
136 * @length: size of the buffer
138 * returns 0 on success
139 * -EINVAL if cb is NULL
140 * -ENOMEM if allocation failed
142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
150 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
151 if (!cb
->request_buffer
.data
)
153 cb
->request_buffer
.size
= length
;
157 * mei_io_cb_alloc_resp_buf - allocate respose buffer
159 * @cb: io callback structure
160 * @length: size of the buffer
162 * returns 0 on success
163 * -EINVAL if cb is NULL
164 * -ENOMEM if allocation failed
166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
174 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
175 if (!cb
->response_buffer
.data
)
177 cb
->response_buffer
.size
= length
;
184 * mei_cl_flush_queues - flushes queue lists belonging to cl.
188 int mei_cl_flush_queues(struct mei_cl
*cl
)
190 if (WARN_ON(!cl
|| !cl
->dev
))
193 dev_dbg(&cl
->dev
->pdev
->dev
, "remove list entry belonging to cl\n");
194 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
195 mei_io_list_flush(&cl
->dev
->write_list
, cl
);
196 mei_io_list_flush(&cl
->dev
->write_waiting_list
, cl
);
197 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
198 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
199 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
200 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
206 * mei_cl_init - initializes intialize cl.
208 * @cl: host client to be initialized
211 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
213 memset(cl
, 0, sizeof(struct mei_cl
));
214 init_waitqueue_head(&cl
->wait
);
215 init_waitqueue_head(&cl
->rx_wait
);
216 init_waitqueue_head(&cl
->tx_wait
);
217 INIT_LIST_HEAD(&cl
->link
);
218 INIT_LIST_HEAD(&cl
->device_link
);
219 cl
->reading_state
= MEI_IDLE
;
220 cl
->writing_state
= MEI_IDLE
;
225 * mei_cl_allocate - allocates cl structure and sets it up.
228 * returns The allocated file or NULL on failure
230 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
234 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
238 mei_cl_init(cl
, dev
);
244 * mei_cl_find_read_cb - find this cl's callback in the read list
248 * returns cb on success, NULL on error
250 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
252 struct mei_device
*dev
= cl
->dev
;
253 struct mei_cl_cb
*cb
= NULL
;
254 struct mei_cl_cb
*next
= NULL
;
256 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
)
257 if (mei_cl_cmp_id(cl
, cb
->cl
))
262 /** mei_cl_link: allocte host id in the host map
265 * @id - fixed host id or -1 for genereting one
267 * returns 0 on success
268 * -EINVAL on incorrect values
269 * -ENONET if client not found
271 int mei_cl_link(struct mei_cl
*cl
, int id
)
273 struct mei_device
*dev
;
275 if (WARN_ON(!cl
|| !cl
->dev
))
280 /* If Id is not asigned get one*/
281 if (id
== MEI_HOST_CLIENT_ID_ANY
)
282 id
= find_first_zero_bit(dev
->host_clients_map
,
285 if (id
>= MEI_CLIENTS_MAX
) {
286 dev_err(&dev
->pdev
->dev
, "id exceded %d", MEI_CLIENTS_MAX
) ;
290 dev
->open_handle_count
++;
292 cl
->host_client_id
= id
;
293 list_add_tail(&cl
->link
, &dev
->file_list
);
295 set_bit(id
, dev
->host_clients_map
);
297 cl
->state
= MEI_FILE_INITIALIZING
;
299 dev_dbg(&dev
->pdev
->dev
, "link cl host id = %d\n", cl
->host_client_id
);
304 * mei_cl_unlink - remove me_cl from the list
308 int mei_cl_unlink(struct mei_cl
*cl
)
310 struct mei_device
*dev
;
311 struct mei_cl
*pos
, *next
;
313 /* don't shout on error exit path */
317 /* wd and amthif might not be initialized */
323 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
324 if (cl
->host_client_id
== pos
->host_client_id
) {
325 dev_dbg(&dev
->pdev
->dev
, "remove host client = %d, ME client = %d\n",
326 pos
->host_client_id
, pos
->me_client_id
);
327 list_del_init(&pos
->link
);
335 void mei_host_client_init(struct work_struct
*work
)
337 struct mei_device
*dev
= container_of(work
,
338 struct mei_device
, init_work
);
339 struct mei_client_properties
*client_props
;
342 mutex_lock(&dev
->device_lock
);
344 bitmap_zero(dev
->host_clients_map
, MEI_CLIENTS_MAX
);
345 dev
->open_handle_count
= 0;
348 * Reserving the first three client IDs
349 * 0: Reserved for MEI Bus Message communications
350 * 1: Reserved for Watchdog
351 * 2: Reserved for AMTHI
353 bitmap_set(dev
->host_clients_map
, 0, 3);
355 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
356 client_props
= &dev
->me_clients
[i
].props
;
358 if (!uuid_le_cmp(client_props
->protocol_name
, mei_amthif_guid
))
359 mei_amthif_host_init(dev
);
360 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_wd_guid
))
361 mei_wd_host_init(dev
);
362 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_nfc_guid
))
363 mei_nfc_host_init(dev
);
367 dev
->dev_state
= MEI_DEV_ENABLED
;
369 mutex_unlock(&dev
->device_lock
);
374 * mei_cl_disconnect - disconnect host clinet form the me one
378 * Locking: called under "dev->device_lock" lock
380 * returns 0 on success, <0 on failure.
382 int mei_cl_disconnect(struct mei_cl
*cl
)
384 struct mei_device
*dev
;
385 struct mei_cl_cb
*cb
;
388 if (WARN_ON(!cl
|| !cl
->dev
))
393 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
396 cb
= mei_io_cb_init(cl
, NULL
);
400 cb
->fop_type
= MEI_FOP_CLOSE
;
401 if (dev
->hbuf_is_ready
) {
402 dev
->hbuf_is_ready
= false;
403 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
405 dev_err(&dev
->pdev
->dev
, "failed to disconnect.\n");
408 mdelay(10); /* Wait for hardware disconnection ready */
409 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
411 dev_dbg(&dev
->pdev
->dev
, "add disconnect cb to control write list\n");
412 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
415 mutex_unlock(&dev
->device_lock
);
417 err
= wait_event_timeout(dev
->wait_recvd_msg
,
418 MEI_FILE_DISCONNECTED
== cl
->state
,
419 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
421 mutex_lock(&dev
->device_lock
);
422 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
424 dev_dbg(&dev
->pdev
->dev
, "successfully disconnected from FW client.\n");
427 if (MEI_FILE_DISCONNECTED
!= cl
->state
)
428 dev_dbg(&dev
->pdev
->dev
, "wrong status client disconnect.\n");
431 dev_dbg(&dev
->pdev
->dev
,
432 "wait failed disconnect err=%08x\n",
435 dev_dbg(&dev
->pdev
->dev
, "failed to disconnect from FW client.\n");
438 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
439 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
447 * mei_cl_is_other_connecting - checks if other
448 * client with the same me client id is connecting
450 * @cl: private data of the file object
452 * returns ture if other client is connected, 0 - otherwise.
454 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
456 struct mei_device
*dev
;
460 if (WARN_ON(!cl
|| !cl
->dev
))
465 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
466 if ((pos
->state
== MEI_FILE_CONNECTING
) &&
467 (pos
!= cl
) && cl
->me_client_id
== pos
->me_client_id
)
476 * mei_cl_connect - connect host clinet to the me one
480 * Locking: called under "dev->device_lock" lock
482 * returns 0 on success, <0 on failure.
484 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
486 struct mei_device
*dev
;
487 struct mei_cl_cb
*cb
;
490 if (WARN_ON(!cl
|| !cl
->dev
))
495 cb
= mei_io_cb_init(cl
, file
);
501 cb
->fop_type
= MEI_FOP_IOCTL
;
503 if (dev
->hbuf_is_ready
&& !mei_cl_is_other_connecting(cl
)) {
504 dev
->hbuf_is_ready
= false;
506 if (mei_hbm_cl_connect_req(dev
, cl
)) {
510 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
511 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
513 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
516 mutex_unlock(&dev
->device_lock
);
517 rets
= wait_event_timeout(dev
->wait_recvd_msg
,
518 (cl
->state
== MEI_FILE_CONNECTED
||
519 cl
->state
== MEI_FILE_DISCONNECTED
),
520 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
521 mutex_lock(&dev
->device_lock
);
523 if (cl
->state
!= MEI_FILE_CONNECTED
) {
526 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
527 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
539 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
541 * @cl: private data of the file object
543 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
544 * -ENOENT if mei_cl is not present
545 * -EINVAL if single_recv_buf == 0
547 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
549 struct mei_device
*dev
;
552 if (WARN_ON(!cl
|| !cl
->dev
))
557 if (!dev
->me_clients_num
)
560 if (cl
->mei_flow_ctrl_creds
> 0)
563 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
564 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
565 if (me_cl
->client_id
== cl
->me_client_id
) {
566 if (me_cl
->mei_flow_ctrl_creds
) {
567 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
579 * mei_cl_flow_ctrl_reduce - reduces flow_control.
581 * @cl: private data of the file object
585 * -ENOENT when me client is not found
586 * -EINVAL when ctrl credits are <= 0
588 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
590 struct mei_device
*dev
;
593 if (WARN_ON(!cl
|| !cl
->dev
))
598 if (!dev
->me_clients_num
)
601 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
602 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
603 if (me_cl
->client_id
== cl
->me_client_id
) {
604 if (me_cl
->props
.single_recv_buf
!= 0) {
605 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
607 dev
->me_clients
[i
].mei_flow_ctrl_creds
--;
609 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
611 cl
->mei_flow_ctrl_creds
--;
620 * mei_cl_read_start - the start read client message function.
624 * returns 0 on success, <0 on failure.
626 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
628 struct mei_device
*dev
;
629 struct mei_cl_cb
*cb
;
633 if (WARN_ON(!cl
|| !cl
->dev
))
638 if (!mei_cl_is_connected(cl
))
642 dev_dbg(&dev
->pdev
->dev
, "read is pending.\n");
645 i
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
647 dev_err(&dev
->pdev
->dev
, "no such me client %d\n",
652 cb
= mei_io_cb_init(cl
, NULL
);
656 /* always allocate at least client max message */
657 length
= max_t(size_t, length
, dev
->me_clients
[i
].props
.max_msg_length
);
658 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
662 cb
->fop_type
= MEI_FOP_READ
;
664 if (dev
->hbuf_is_ready
) {
665 dev
->hbuf_is_ready
= false;
666 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
670 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
672 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
681 * mei_cl_irq_write_complete - write a message to device
682 * from the interrupt thread context
685 * @cb: callback block.
686 * @slots: free slots.
687 * @cmpl_list: complete list.
689 * returns 0, OK; otherwise error.
691 int mei_cl_irq_write_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
692 s32
*slots
, struct mei_cl_cb
*cmpl_list
)
694 struct mei_device
*dev
= cl
->dev
;
695 struct mei_msg_hdr mei_hdr
;
696 size_t len
= cb
->request_buffer
.size
- cb
->buf_idx
;
697 u32 msg_slots
= mei_data2slots(len
);
699 mei_hdr
.host_addr
= cl
->host_client_id
;
700 mei_hdr
.me_addr
= cl
->me_client_id
;
701 mei_hdr
.reserved
= 0;
703 if (*slots
>= msg_slots
) {
704 mei_hdr
.length
= len
;
705 mei_hdr
.msg_complete
= 1;
706 /* Split the message only if we can write the whole host buffer */
707 } else if (*slots
== dev
->hbuf_depth
) {
709 len
= (*slots
* sizeof(u32
)) - sizeof(struct mei_msg_hdr
);
710 mei_hdr
.length
= len
;
711 mei_hdr
.msg_complete
= 0;
713 /* wait for next time the host buffer is empty */
717 dev_dbg(&dev
->pdev
->dev
, "buf: size = %d idx = %lu\n",
718 cb
->request_buffer
.size
, cb
->buf_idx
);
719 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(&mei_hdr
));
722 if (mei_write_message(dev
, &mei_hdr
,
723 cb
->request_buffer
.data
+ cb
->buf_idx
)) {
724 cl
->status
= -ENODEV
;
725 list_move_tail(&cb
->list
, &cmpl_list
->list
);
730 cl
->writing_state
= MEI_WRITING
;
731 cb
->buf_idx
+= mei_hdr
.length
;
733 if (mei_hdr
.msg_complete
) {
734 if (mei_cl_flow_ctrl_reduce(cl
))
736 list_move_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
743 * mei_cl_write - submit a write cb to mei device
744 assumes device_lock is locked
747 * @cl: write callback with filled data
749 * returns numbe of bytes sent on success, <0 on failure.
751 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
753 struct mei_device
*dev
;
754 struct mei_msg_data
*buf
;
755 struct mei_msg_hdr mei_hdr
;
759 if (WARN_ON(!cl
|| !cl
->dev
))
768 buf
= &cb
->request_buffer
;
770 dev_dbg(&dev
->pdev
->dev
, "mei_cl_write %d\n", buf
->size
);
773 cb
->fop_type
= MEI_FOP_WRITE
;
775 rets
= mei_cl_flow_ctrl_creds(cl
);
779 /* Host buffer is not ready, we queue the request */
780 if (rets
== 0 || !dev
->hbuf_is_ready
) {
782 /* unseting complete will enqueue the cb for write */
783 mei_hdr
.msg_complete
= 0;
788 dev
->hbuf_is_ready
= false;
790 /* Check for a maximum length */
791 if (buf
->size
> mei_hbuf_max_len(dev
)) {
792 mei_hdr
.length
= mei_hbuf_max_len(dev
);
793 mei_hdr
.msg_complete
= 0;
795 mei_hdr
.length
= buf
->size
;
796 mei_hdr
.msg_complete
= 1;
799 mei_hdr
.host_addr
= cl
->host_client_id
;
800 mei_hdr
.me_addr
= cl
->me_client_id
;
801 mei_hdr
.reserved
= 0;
803 dev_dbg(&dev
->pdev
->dev
, "write " MEI_HDR_FMT
"\n",
804 MEI_HDR_PRM(&mei_hdr
));
807 if (mei_write_message(dev
, &mei_hdr
, buf
->data
)) {
812 cl
->writing_state
= MEI_WRITING
;
813 cb
->buf_idx
= mei_hdr
.length
;
817 if (mei_hdr
.msg_complete
) {
818 if (mei_cl_flow_ctrl_reduce(cl
)) {
822 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
824 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
828 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
830 mutex_unlock(&dev
->device_lock
);
831 if (wait_event_interruptible(cl
->tx_wait
,
832 cl
->writing_state
== MEI_WRITE_COMPLETE
)) {
833 if (signal_pending(current
))
838 mutex_lock(&dev
->device_lock
);
846 * mei_cl_complete - processes completed operation for a client
848 * @cl: private data of the file object.
849 * @cb: callback block.
851 void mei_cl_complete(struct mei_cl
*cl
, struct mei_cl_cb
*cb
)
853 if (cb
->fop_type
== MEI_FOP_WRITE
) {
856 cl
->writing_state
= MEI_WRITE_COMPLETE
;
857 if (waitqueue_active(&cl
->tx_wait
))
858 wake_up_interruptible(&cl
->tx_wait
);
860 } else if (cb
->fop_type
== MEI_FOP_READ
&&
861 MEI_READING
== cl
->reading_state
) {
862 cl
->reading_state
= MEI_READ_COMPLETE
;
863 if (waitqueue_active(&cl
->rx_wait
))
864 wake_up_interruptible(&cl
->rx_wait
);
866 mei_cl_bus_rx_event(cl
);
873 * mei_cl_all_disconnect - disconnect forcefully all connected clients
878 void mei_cl_all_disconnect(struct mei_device
*dev
)
880 struct mei_cl
*cl
, *next
;
882 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
883 cl
->state
= MEI_FILE_DISCONNECTED
;
884 cl
->mei_flow_ctrl_creds
= 0;
892 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
896 void mei_cl_all_wakeup(struct mei_device
*dev
)
898 struct mei_cl
*cl
, *next
;
899 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
900 if (waitqueue_active(&cl
->rx_wait
)) {
901 dev_dbg(&dev
->pdev
->dev
, "Waking up reading client!\n");
902 wake_up_interruptible(&cl
->rx_wait
);
904 if (waitqueue_active(&cl
->tx_wait
)) {
905 dev_dbg(&dev
->pdev
->dev
, "Waking up writing client!\n");
906 wake_up_interruptible(&cl
->tx_wait
);
912 * mei_cl_all_write_clear - clear all pending writes
916 void mei_cl_all_write_clear(struct mei_device
*dev
)
918 struct mei_cl_cb
*cb
, *next
;
920 list_for_each_entry_safe(cb
, next
, &dev
->write_list
.list
, list
) {
This page took 0.052372 seconds and 5 git commands to generate.