3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/export.h>
19 #include <linux/pci.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
23 #include <linux/jiffies.h>
25 #include <linux/mei.h>
33 * mei_irq_compl_handler - dispatch complete handlers
34 * for the completed callbacks
37 * @compl_list - list of completed cbs
39 void mei_irq_compl_handler(struct mei_device
*dev
, struct mei_cl_cb
*compl_list
)
41 struct mei_cl_cb
*cb
, *next
;
44 list_for_each_entry_safe(cb
, next
, &compl_list
->list
, list
) {
50 dev_dbg(&dev
->pdev
->dev
, "completing call back.\n");
51 if (cl
== &dev
->iamthif_cl
)
52 mei_amthif_complete(dev
, cb
);
54 mei_cl_complete(cl
, cb
);
57 EXPORT_SYMBOL_GPL(mei_irq_compl_handler
);
60 * mei_cl_hbm_equal - check if hbm is addressed to the client
63 * @mei_hdr: header of mei client message
65 * returns true if matches, false otherwise
67 static inline int mei_cl_hbm_equal(struct mei_cl
*cl
,
68 struct mei_msg_hdr
*mei_hdr
)
70 return cl
->host_client_id
== mei_hdr
->host_addr
&&
71 cl
->me_client_id
== mei_hdr
->me_addr
;
74 * mei_cl_is_reading - checks if the client
75 is the one to read this message
78 * @mei_hdr: header of mei message
80 * returns true on match and false otherwise
82 static bool mei_cl_is_reading(struct mei_cl
*cl
, struct mei_msg_hdr
*mei_hdr
)
84 return mei_cl_hbm_equal(cl
, mei_hdr
) &&
85 cl
->state
== MEI_FILE_CONNECTED
&&
86 cl
->reading_state
!= MEI_READ_COMPLETE
;
90 * mei_irq_read_client_message - process client message
92 * @dev: the device structure
93 * @mei_hdr: header of mei client message
94 * @complete_list: An instance of our list structure
96 * returns 0 on success, <0 on failure.
98 static int mei_cl_irq_read_msg(struct mei_device
*dev
,
99 struct mei_msg_hdr
*mei_hdr
,
100 struct mei_cl_cb
*complete_list
)
103 struct mei_cl_cb
*cb
, *next
;
104 unsigned char *buffer
= NULL
;
106 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
) {
108 if (!cl
|| !mei_cl_is_reading(cl
, mei_hdr
))
111 cl
->reading_state
= MEI_READING
;
113 if (cb
->response_buffer
.size
== 0 ||
114 cb
->response_buffer
.data
== NULL
) {
115 cl_err(dev
, cl
, "response buffer is not allocated.\n");
120 if (cb
->response_buffer
.size
< mei_hdr
->length
+ cb
->buf_idx
) {
121 cl_dbg(dev
, cl
, "message overflow. size %d len %d idx %ld\n",
122 cb
->response_buffer
.size
,
123 mei_hdr
->length
, cb
->buf_idx
);
124 buffer
= krealloc(cb
->response_buffer
.data
,
125 mei_hdr
->length
+ cb
->buf_idx
,
129 cl_err(dev
, cl
, "allocation failed.\n");
133 cb
->response_buffer
.data
= buffer
;
134 cb
->response_buffer
.size
=
135 mei_hdr
->length
+ cb
->buf_idx
;
138 buffer
= cb
->response_buffer
.data
+ cb
->buf_idx
;
139 mei_read_slots(dev
, buffer
, mei_hdr
->length
);
141 cb
->buf_idx
+= mei_hdr
->length
;
142 if (mei_hdr
->msg_complete
) {
145 cl_dbg(dev
, cl
, "completed read length = %lu\n",
147 list_add_tail(&cb
->list
, &complete_list
->list
);
152 dev_dbg(&dev
->pdev
->dev
, "message read\n");
154 mei_read_slots(dev
, dev
->rd_msg_buf
, mei_hdr
->length
);
155 dev_dbg(&dev
->pdev
->dev
, "discarding message " MEI_HDR_FMT
"\n",
156 MEI_HDR_PRM(mei_hdr
));
163 * mei_cl_irq_disconnect_rsp - send disconnection response message
166 * @cb: callback block.
167 * @cmpl_list: complete list.
169 * returns 0, OK; otherwise, error.
171 static int mei_cl_irq_disconnect_rsp(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
172 struct mei_cl_cb
*cmpl_list
)
174 struct mei_device
*dev
= cl
->dev
;
179 slots
= mei_hbuf_empty_slots(dev
);
180 msg_slots
= mei_data2slots(sizeof(struct hbm_client_connect_response
));
182 if (slots
< msg_slots
)
185 ret
= mei_hbm_cl_disconnect_rsp(dev
, cl
);
187 cl
->state
= MEI_FILE_DISCONNECTED
;
198 * mei_cl_irq_close - processes close related operation from
199 * interrupt thread context - send disconnect request
202 * @cb: callback block.
203 * @cmpl_list: complete list.
205 * returns 0, OK; otherwise, error.
207 static int mei_cl_irq_close(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
208 struct mei_cl_cb
*cmpl_list
)
210 struct mei_device
*dev
= cl
->dev
;
214 msg_slots
= mei_data2slots(sizeof(struct hbm_client_connect_request
));
215 slots
= mei_hbuf_empty_slots(dev
);
217 if (slots
< msg_slots
)
220 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
223 list_move_tail(&cb
->list
, &cmpl_list
->list
);
227 cl
->state
= MEI_FILE_DISCONNECTING
;
230 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
231 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
238 * mei_cl_irq_close - processes client read related operation from the
239 * interrupt thread context - request for flow control credits
242 * @cb: callback block.
243 * @cmpl_list: complete list.
245 * returns 0, OK; otherwise, error.
247 static int mei_cl_irq_read(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
248 struct mei_cl_cb
*cmpl_list
)
250 struct mei_device
*dev
= cl
->dev
;
255 msg_slots
= mei_data2slots(sizeof(struct hbm_flow_control
));
256 slots
= mei_hbuf_empty_slots(dev
);
258 if (slots
< msg_slots
)
261 ret
= mei_hbm_cl_flow_control_req(dev
, cl
);
265 list_move_tail(&cb
->list
, &cmpl_list
->list
);
269 list_move_tail(&cb
->list
, &dev
->read_list
.list
);
276 * mei_cl_irq_connect - send connect request in irq_thread context
279 * @cb: callback block.
280 * @cmpl_list: complete list.
282 * returns 0, OK; otherwise, error.
284 static int mei_cl_irq_connect(struct mei_cl
*cl
, struct mei_cl_cb
*cb
,
285 struct mei_cl_cb
*cmpl_list
)
287 struct mei_device
*dev
= cl
->dev
;
292 msg_slots
= mei_data2slots(sizeof(struct hbm_client_connect_request
));
293 slots
= mei_hbuf_empty_slots(dev
);
295 if (mei_cl_is_other_connecting(cl
))
298 if (slots
< msg_slots
)
301 cl
->state
= MEI_FILE_CONNECTING
;
303 ret
= mei_hbm_cl_connect_req(dev
, cl
);
311 list_move_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
312 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
318 * mei_irq_read_handler - bottom half read routine after ISR to
319 * handle the read processing.
321 * @dev: the device structure
322 * @cmpl_list: An instance of our list structure
323 * @slots: slots to read.
325 * returns 0 on success, <0 on failure.
327 int mei_irq_read_handler(struct mei_device
*dev
,
328 struct mei_cl_cb
*cmpl_list
, s32
*slots
)
330 struct mei_msg_hdr
*mei_hdr
;
334 if (!dev
->rd_msg_hdr
) {
335 dev
->rd_msg_hdr
= mei_read_hdr(dev
);
337 dev_dbg(&dev
->pdev
->dev
, "slots =%08x.\n", *slots
);
339 mei_hdr
= (struct mei_msg_hdr
*) &dev
->rd_msg_hdr
;
340 dev_dbg(&dev
->pdev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(mei_hdr
));
342 if (mei_hdr
->reserved
|| !dev
->rd_msg_hdr
) {
343 dev_err(&dev
->pdev
->dev
, "corrupted message header 0x%08X\n",
349 if (mei_slots2data(*slots
) < mei_hdr
->length
) {
350 dev_err(&dev
->pdev
->dev
, "less data available than length=%08x.\n",
352 /* we can't read the message */
358 if (mei_hdr
->host_addr
== 0 && mei_hdr
->me_addr
== 0) {
359 ret
= mei_hbm_dispatch(dev
, mei_hdr
);
361 dev_dbg(&dev
->pdev
->dev
, "mei_hbm_dispatch failed ret = %d\n",
368 /* find recipient cl */
369 list_for_each_entry(cl
, &dev
->file_list
, link
) {
370 if (mei_cl_hbm_equal(cl
, mei_hdr
)) {
371 cl_dbg(dev
, cl
, "got a message\n");
376 /* if no recipient cl was found we assume corrupted header */
377 if (&cl
->link
== &dev
->file_list
) {
378 dev_err(&dev
->pdev
->dev
, "no destination client found 0x%08X\n",
384 if (mei_hdr
->host_addr
== dev
->iamthif_cl
.host_client_id
&&
385 MEI_FILE_CONNECTED
== dev
->iamthif_cl
.state
&&
386 dev
->iamthif_state
== MEI_IAMTHIF_READING
) {
388 ret
= mei_amthif_irq_read_msg(dev
, mei_hdr
, cmpl_list
);
390 dev_err(&dev
->pdev
->dev
, "mei_amthif_irq_read_msg failed = %d\n",
395 ret
= mei_cl_irq_read_msg(dev
, mei_hdr
, cmpl_list
);
397 dev_err(&dev
->pdev
->dev
, "mei_cl_irq_read_msg failed = %d\n",
404 /* reset the number of slots and header */
405 *slots
= mei_count_full_read_slots(dev
);
408 if (*slots
== -EOVERFLOW
) {
409 /* overflow - reset */
410 dev_err(&dev
->pdev
->dev
, "resetting due to slots overflow.\n");
411 /* set the event since message has been read */
418 EXPORT_SYMBOL_GPL(mei_irq_read_handler
);
422 * mei_irq_write_handler - dispatch write requests
425 * @dev: the device structure
426 * @cmpl_list: An instance of our list structure
428 * returns 0 on success, <0 on failure.
430 int mei_irq_write_handler(struct mei_device
*dev
, struct mei_cl_cb
*cmpl_list
)
434 struct mei_cl_cb
*cb
, *next
;
435 struct mei_cl_cb
*list
;
440 if (!mei_hbuf_acquire(dev
))
443 slots
= mei_hbuf_empty_slots(dev
);
447 /* complete all waiting for write CB */
448 dev_dbg(&dev
->pdev
->dev
, "complete all waiting for write cb.\n");
450 list
= &dev
->write_waiting_list
;
451 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
458 if (MEI_WRITING
== cl
->writing_state
&&
459 cb
->fop_type
== MEI_FOP_WRITE
&&
460 cl
!= &dev
->iamthif_cl
) {
461 cl_dbg(dev
, cl
, "MEI WRITE COMPLETE\n");
462 cl
->writing_state
= MEI_WRITE_COMPLETE
;
463 list_add_tail(&cb
->list
, &cmpl_list
->list
);
465 if (cl
== &dev
->iamthif_cl
) {
466 cl_dbg(dev
, cl
, "check iamthif flow control.\n");
467 if (dev
->iamthif_flow_control_pending
) {
468 ret
= mei_amthif_irq_read(dev
, &slots
);
475 if (dev
->wd_state
== MEI_WD_STOPPING
) {
476 dev
->wd_state
= MEI_WD_IDLE
;
477 wake_up(&dev
->wait_stop_wd
);
480 if (mei_cl_is_connected(&dev
->wd_cl
)) {
481 if (dev
->wd_pending
&&
482 mei_cl_flow_ctrl_creds(&dev
->wd_cl
) > 0) {
483 ret
= mei_wd_send(dev
);
486 dev
->wd_pending
= false;
490 /* complete control write list CB */
491 dev_dbg(&dev
->pdev
->dev
, "complete control write list cb.\n");
492 list_for_each_entry_safe(cb
, next
, &dev
->ctrl_wr_list
.list
, list
) {
498 switch (cb
->fop_type
) {
500 /* send disconnect message */
501 ret
= mei_cl_irq_close(cl
, cb
, cmpl_list
);
507 /* send flow control message */
508 ret
= mei_cl_irq_read(cl
, cb
, cmpl_list
);
513 case MEI_FOP_CONNECT
:
514 /* connect message */
515 ret
= mei_cl_irq_connect(cl
, cb
, cmpl_list
);
520 case MEI_FOP_DISCONNECT_RSP
:
521 /* send disconnect resp */
522 ret
= mei_cl_irq_disconnect_rsp(cl
, cb
, cmpl_list
);
531 /* complete write list CB */
532 dev_dbg(&dev
->pdev
->dev
, "complete write list cb.\n");
533 list_for_each_entry_safe(cb
, next
, &dev
->write_list
.list
, list
) {
537 if (cl
== &dev
->iamthif_cl
)
538 ret
= mei_amthif_irq_write(cl
, cb
, cmpl_list
);
540 ret
= mei_cl_irq_write(cl
, cb
, cmpl_list
);
546 EXPORT_SYMBOL_GPL(mei_irq_write_handler
);
551 * mei_timer - timer function.
553 * @work: pointer to the work_struct structure
556 void mei_timer(struct work_struct
*work
)
558 unsigned long timeout
;
560 struct mei_cl_cb
*cb_pos
= NULL
;
561 struct mei_cl_cb
*cb_next
= NULL
;
563 struct mei_device
*dev
= container_of(work
,
564 struct mei_device
, timer_work
.work
);
567 mutex_lock(&dev
->device_lock
);
569 /* Catch interrupt stalls during HBM init handshake */
570 if (dev
->dev_state
== MEI_DEV_INIT_CLIENTS
&&
571 dev
->hbm_state
!= MEI_HBM_IDLE
) {
573 if (dev
->init_clients_timer
) {
574 if (--dev
->init_clients_timer
== 0) {
575 dev_err(&dev
->pdev
->dev
, "timer: init clients timeout hbm_state = %d.\n",
583 if (dev
->dev_state
!= MEI_DEV_ENABLED
)
586 /*** connect/disconnect timeouts ***/
587 list_for_each_entry(cl
, &dev
->file_list
, link
) {
588 if (cl
->timer_count
) {
589 if (--cl
->timer_count
== 0) {
590 dev_err(&dev
->pdev
->dev
, "timer: connect/disconnect timeout.\n");
597 if (!mei_cl_is_connected(&dev
->iamthif_cl
))
600 if (dev
->iamthif_stall_timer
) {
601 if (--dev
->iamthif_stall_timer
== 0) {
602 dev_err(&dev
->pdev
->dev
, "timer: amthif hanged.\n");
604 dev
->iamthif_msg_buf_size
= 0;
605 dev
->iamthif_msg_buf_index
= 0;
606 dev
->iamthif_canceled
= false;
607 dev
->iamthif_ioctl
= true;
608 dev
->iamthif_state
= MEI_IAMTHIF_IDLE
;
609 dev
->iamthif_timer
= 0;
611 mei_io_cb_free(dev
->iamthif_current_cb
);
612 dev
->iamthif_current_cb
= NULL
;
614 dev
->iamthif_file_object
= NULL
;
615 mei_amthif_run_next_cmd(dev
);
619 if (dev
->iamthif_timer
) {
621 timeout
= dev
->iamthif_timer
+
622 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER
);
624 dev_dbg(&dev
->pdev
->dev
, "dev->iamthif_timer = %ld\n",
626 dev_dbg(&dev
->pdev
->dev
, "timeout = %ld\n", timeout
);
627 dev_dbg(&dev
->pdev
->dev
, "jiffies = %ld\n", jiffies
);
628 if (time_after(jiffies
, timeout
)) {
630 * User didn't read the AMTHI data on time (15sec)
631 * freeing AMTHI for other requests
634 dev_dbg(&dev
->pdev
->dev
, "freeing AMTHI for other requests\n");
636 list_for_each_entry_safe(cb_pos
, cb_next
,
637 &dev
->amthif_rd_complete_list
.list
, list
) {
639 cl
= cb_pos
->file_object
->private_data
;
641 /* Finding the AMTHI entry. */
642 if (cl
== &dev
->iamthif_cl
)
643 list_del(&cb_pos
->list
);
645 mei_io_cb_free(dev
->iamthif_current_cb
);
646 dev
->iamthif_current_cb
= NULL
;
648 dev
->iamthif_file_object
->private_data
= NULL
;
649 dev
->iamthif_file_object
= NULL
;
650 dev
->iamthif_timer
= 0;
651 mei_amthif_run_next_cmd(dev
);
656 if (dev
->dev_state
!= MEI_DEV_DISABLED
)
657 schedule_delayed_work(&dev
->timer_work
, 2 * HZ
);
658 mutex_unlock(&dev
->device_lock
);