ACPI 2.0 / ECDT: Split EC_FLAGS_HANDLERS_INSTALLED
[deliverable/linux.git] / drivers / acpi / ec.c
1 /*
2 * ec.c - ACPI Embedded Controller Driver (v3)
3 *
4 * Copyright (C) 2001-2015 Intel Corporation
5 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * 2004 Luming Yu <luming.yu@intel.com>
9 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
10 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or (at
18 * your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 */
27
28 /* Uncomment next line to get verbose printout */
29 /* #define DEBUG */
30 #define pr_fmt(fmt) "ACPI : EC: " fmt
31
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/delay.h>
37 #include <linux/interrupt.h>
38 #include <linux/list.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/acpi.h>
42 #include <linux/dmi.h>
43 #include <asm/io.h>
44
45 #include "internal.h"
46
47 #define ACPI_EC_CLASS "embedded_controller"
48 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
49 #define ACPI_EC_FILE_INFO "info"
50
51 /* EC status register */
52 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
53 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
54 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
55 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
56 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
57
58 /*
59 * The SCI_EVT clearing timing is not defined by the ACPI specification.
60 * This leads to lots of practical timing issues for the host EC driver.
61 * The following variations are defined (from the target EC firmware's
62 * perspective):
63 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
64 * target can clear SCI_EVT at any time so long as the host can see
65 * the indication by reading the status register (EC_SC). So the
66 * host should re-check SCI_EVT after the first time the SCI_EVT
67 * indication is seen, which is the same time the query request
68 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
69 * at any later time could indicate another event. Normally such
70 * kind of EC firmware has implemented an event queue and will
71 * return 0x00 to indicate "no outstanding event".
72 * QUERY: After seeing the query request (QR_EC) written to the command
73 * register (EC_CMD) by the host and having prepared the responding
74 * event value in the data register (EC_DATA), the target can safely
75 * clear SCI_EVT because the target can confirm that the current
76 * event is being handled by the host. The host then should check
77 * SCI_EVT right after reading the event response from the data
78 * register (EC_DATA).
79 * EVENT: After seeing the event response read from the data register
80 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
81 * target requires time to notice the change in the data register
82 * (EC_DATA), the host may be required to wait additional guarding
83 * time before checking the SCI_EVT again. Such guarding may not be
84 * necessary if the host is notified via another IRQ.
85 */
86 #define ACPI_EC_EVT_TIMING_STATUS 0x00
87 #define ACPI_EC_EVT_TIMING_QUERY 0x01
88 #define ACPI_EC_EVT_TIMING_EVENT 0x02
89
90 /* EC commands */
91 enum ec_command {
92 ACPI_EC_COMMAND_READ = 0x80,
93 ACPI_EC_COMMAND_WRITE = 0x81,
94 ACPI_EC_BURST_ENABLE = 0x82,
95 ACPI_EC_BURST_DISABLE = 0x83,
96 ACPI_EC_COMMAND_QUERY = 0x84,
97 };
98
99 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
100 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
101 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
102 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
103 * when trying to clear the EC */
104
105 enum {
106 EC_FLAGS_QUERY_PENDING, /* Query is pending */
107 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
108 EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
109 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
110 EC_FLAGS_STARTED, /* Driver is started */
111 EC_FLAGS_STOPPED, /* Driver is stopped */
112 EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
113 * current command processing */
114 };
115
116 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
117 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
118
119 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
120 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
121 module_param(ec_delay, uint, 0644);
122 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
123
124 static bool ec_busy_polling __read_mostly;
125 module_param(ec_busy_polling, bool, 0644);
126 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
127
128 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
129 module_param(ec_polling_guard, uint, 0644);
130 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
131
132 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
133
134 /*
135 * If the number of false interrupts per one transaction exceeds
136 * this threshold, will think there is a GPE storm happened and
137 * will disable the GPE for normal transaction.
138 */
139 static unsigned int ec_storm_threshold __read_mostly = 8;
140 module_param(ec_storm_threshold, uint, 0644);
141 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
142
143 struct acpi_ec_query_handler {
144 struct list_head node;
145 acpi_ec_query_func func;
146 acpi_handle handle;
147 void *data;
148 u8 query_bit;
149 struct kref kref;
150 };
151
152 struct transaction {
153 const u8 *wdata;
154 u8 *rdata;
155 unsigned short irq_count;
156 u8 command;
157 u8 wi;
158 u8 ri;
159 u8 wlen;
160 u8 rlen;
161 u8 flags;
162 };
163
164 struct acpi_ec_query {
165 struct transaction transaction;
166 struct work_struct work;
167 struct acpi_ec_query_handler *handler;
168 };
169
170 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
171 static void advance_transaction(struct acpi_ec *ec);
172 static void acpi_ec_event_handler(struct work_struct *work);
173 static void acpi_ec_event_processor(struct work_struct *work);
174
175 struct acpi_ec *boot_ec, *first_ec;
176 EXPORT_SYMBOL(first_ec);
177
178 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
179 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
180 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
181 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
182
183 /* --------------------------------------------------------------------------
184 * Logging/Debugging
185 * -------------------------------------------------------------------------- */
186
187 /*
188 * Splitters used by the developers to track the boundary of the EC
189 * handling processes.
190 */
191 #ifdef DEBUG
192 #define EC_DBG_SEP " "
193 #define EC_DBG_DRV "+++++"
194 #define EC_DBG_STM "====="
195 #define EC_DBG_REQ "*****"
196 #define EC_DBG_EVT "#####"
197 #else
198 #define EC_DBG_SEP ""
199 #define EC_DBG_DRV
200 #define EC_DBG_STM
201 #define EC_DBG_REQ
202 #define EC_DBG_EVT
203 #endif
204
205 #define ec_log_raw(fmt, ...) \
206 pr_info(fmt "\n", ##__VA_ARGS__)
207 #define ec_dbg_raw(fmt, ...) \
208 pr_debug(fmt "\n", ##__VA_ARGS__)
209 #define ec_log(filter, fmt, ...) \
210 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
211 #define ec_dbg(filter, fmt, ...) \
212 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
213
214 #define ec_log_drv(fmt, ...) \
215 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
216 #define ec_dbg_drv(fmt, ...) \
217 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
218 #define ec_dbg_stm(fmt, ...) \
219 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
220 #define ec_dbg_req(fmt, ...) \
221 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
222 #define ec_dbg_evt(fmt, ...) \
223 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
224 #define ec_dbg_ref(ec, fmt, ...) \
225 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
226
227 /* --------------------------------------------------------------------------
228 * Device Flags
229 * -------------------------------------------------------------------------- */
230
231 static bool acpi_ec_started(struct acpi_ec *ec)
232 {
233 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
234 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
235 }
236
237 static bool acpi_ec_flushed(struct acpi_ec *ec)
238 {
239 return ec->reference_count == 1;
240 }
241
242 /* --------------------------------------------------------------------------
243 * EC Registers
244 * -------------------------------------------------------------------------- */
245
246 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
247 {
248 u8 x = inb(ec->command_addr);
249
250 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
251 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
252 x,
253 !!(x & ACPI_EC_FLAG_SCI),
254 !!(x & ACPI_EC_FLAG_BURST),
255 !!(x & ACPI_EC_FLAG_CMD),
256 !!(x & ACPI_EC_FLAG_IBF),
257 !!(x & ACPI_EC_FLAG_OBF));
258 return x;
259 }
260
261 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
262 {
263 u8 x = inb(ec->data_addr);
264
265 ec->timestamp = jiffies;
266 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
267 return x;
268 }
269
270 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
271 {
272 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
273 outb(command, ec->command_addr);
274 ec->timestamp = jiffies;
275 }
276
277 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
278 {
279 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
280 outb(data, ec->data_addr);
281 ec->timestamp = jiffies;
282 }
283
284 #ifdef DEBUG
285 static const char *acpi_ec_cmd_string(u8 cmd)
286 {
287 switch (cmd) {
288 case 0x80:
289 return "RD_EC";
290 case 0x81:
291 return "WR_EC";
292 case 0x82:
293 return "BE_EC";
294 case 0x83:
295 return "BD_EC";
296 case 0x84:
297 return "QR_EC";
298 }
299 return "UNKNOWN";
300 }
301 #else
302 #define acpi_ec_cmd_string(cmd) "UNDEF"
303 #endif
304
305 /* --------------------------------------------------------------------------
306 * GPE Registers
307 * -------------------------------------------------------------------------- */
308
309 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
310 {
311 acpi_event_status gpe_status = 0;
312
313 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
314 return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
315 }
316
317 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
318 {
319 if (open)
320 acpi_enable_gpe(NULL, ec->gpe);
321 else {
322 BUG_ON(ec->reference_count < 1);
323 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
324 }
325 if (acpi_ec_is_gpe_raised(ec)) {
326 /*
327 * On some platforms, EN=1 writes cannot trigger GPE. So
328 * software need to manually trigger a pseudo GPE event on
329 * EN=1 writes.
330 */
331 ec_dbg_raw("Polling quirk");
332 advance_transaction(ec);
333 }
334 }
335
336 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
337 {
338 if (close)
339 acpi_disable_gpe(NULL, ec->gpe);
340 else {
341 BUG_ON(ec->reference_count < 1);
342 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
343 }
344 }
345
346 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
347 {
348 /*
349 * GPE STS is a W1C register, which means:
350 * 1. Software can clear it without worrying about clearing other
351 * GPEs' STS bits when the hardware sets them in parallel.
352 * 2. As long as software can ensure only clearing it when it is
353 * set, hardware won't set it in parallel.
354 * So software can clear GPE in any contexts.
355 * Warning: do not move the check into advance_transaction() as the
356 * EC commands will be sent without GPE raised.
357 */
358 if (!acpi_ec_is_gpe_raised(ec))
359 return;
360 acpi_clear_gpe(NULL, ec->gpe);
361 }
362
363 /* --------------------------------------------------------------------------
364 * Transaction Management
365 * -------------------------------------------------------------------------- */
366
367 static void acpi_ec_submit_request(struct acpi_ec *ec)
368 {
369 ec->reference_count++;
370 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
371 ec->reference_count == 1)
372 acpi_ec_enable_gpe(ec, true);
373 }
374
375 static void acpi_ec_complete_request(struct acpi_ec *ec)
376 {
377 bool flushed = false;
378
379 ec->reference_count--;
380 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
381 ec->reference_count == 0)
382 acpi_ec_disable_gpe(ec, true);
383 flushed = acpi_ec_flushed(ec);
384 if (flushed)
385 wake_up(&ec->wait);
386 }
387
388 static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
389 {
390 if (!test_bit(flag, &ec->flags)) {
391 acpi_ec_disable_gpe(ec, false);
392 ec_dbg_drv("Polling enabled");
393 set_bit(flag, &ec->flags);
394 }
395 }
396
397 static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
398 {
399 if (test_bit(flag, &ec->flags)) {
400 clear_bit(flag, &ec->flags);
401 acpi_ec_enable_gpe(ec, false);
402 ec_dbg_drv("Polling disabled");
403 }
404 }
405
406 /*
407 * acpi_ec_submit_flushable_request() - Increase the reference count unless
408 * the flush operation is not in
409 * progress
410 * @ec: the EC device
411 *
412 * This function must be used before taking a new action that should hold
413 * the reference count. If this function returns false, then the action
414 * must be discarded or it will prevent the flush operation from being
415 * completed.
416 */
417 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
418 {
419 if (!acpi_ec_started(ec))
420 return false;
421 acpi_ec_submit_request(ec);
422 return true;
423 }
424
425 static void acpi_ec_submit_query(struct acpi_ec *ec)
426 {
427 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
428 ec_dbg_evt("Command(%s) submitted/blocked",
429 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
430 ec->nr_pending_queries++;
431 schedule_work(&ec->work);
432 }
433 }
434
435 static void acpi_ec_complete_query(struct acpi_ec *ec)
436 {
437 if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
438 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
439 ec_dbg_evt("Command(%s) unblocked",
440 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
441 }
442 }
443
444 static bool acpi_ec_guard_event(struct acpi_ec *ec)
445 {
446 bool guarded = true;
447 unsigned long flags;
448
449 spin_lock_irqsave(&ec->lock, flags);
450 /*
451 * If firmware SCI_EVT clearing timing is "event", we actually
452 * don't know when the SCI_EVT will be cleared by firmware after
453 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
454 * acceptable period.
455 *
456 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
457 * flagged, which means SCI_EVT check has just been performed.
458 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
459 * guarding should have already been performed (via
460 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
461 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
462 * ACPI_EC_COMMAND_POLL state immediately.
463 */
464 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
465 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
466 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
467 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
468 guarded = false;
469 spin_unlock_irqrestore(&ec->lock, flags);
470 return guarded;
471 }
472
473 static int ec_transaction_polled(struct acpi_ec *ec)
474 {
475 unsigned long flags;
476 int ret = 0;
477
478 spin_lock_irqsave(&ec->lock, flags);
479 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
480 ret = 1;
481 spin_unlock_irqrestore(&ec->lock, flags);
482 return ret;
483 }
484
485 static int ec_transaction_completed(struct acpi_ec *ec)
486 {
487 unsigned long flags;
488 int ret = 0;
489
490 spin_lock_irqsave(&ec->lock, flags);
491 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
492 ret = 1;
493 spin_unlock_irqrestore(&ec->lock, flags);
494 return ret;
495 }
496
497 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
498 {
499 ec->curr->flags |= flag;
500 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
501 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
502 flag == ACPI_EC_COMMAND_POLL)
503 acpi_ec_complete_query(ec);
504 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
505 flag == ACPI_EC_COMMAND_COMPLETE)
506 acpi_ec_complete_query(ec);
507 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
508 flag == ACPI_EC_COMMAND_COMPLETE)
509 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
510 }
511 }
512
513 static void advance_transaction(struct acpi_ec *ec)
514 {
515 struct transaction *t;
516 u8 status;
517 bool wakeup = false;
518
519 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
520 smp_processor_id());
521 /*
522 * By always clearing STS before handling all indications, we can
523 * ensure a hardware STS 0->1 change after this clearing can always
524 * trigger a GPE interrupt.
525 */
526 acpi_ec_clear_gpe(ec);
527 status = acpi_ec_read_status(ec);
528 t = ec->curr;
529 /*
530 * Another IRQ or a guarded polling mode advancement is detected,
531 * the next QR_EC submission is then allowed.
532 */
533 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
534 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
535 (!ec->nr_pending_queries ||
536 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
537 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
538 acpi_ec_complete_query(ec);
539 }
540 }
541 if (!t)
542 goto err;
543 if (t->flags & ACPI_EC_COMMAND_POLL) {
544 if (t->wlen > t->wi) {
545 if ((status & ACPI_EC_FLAG_IBF) == 0)
546 acpi_ec_write_data(ec, t->wdata[t->wi++]);
547 else
548 goto err;
549 } else if (t->rlen > t->ri) {
550 if ((status & ACPI_EC_FLAG_OBF) == 1) {
551 t->rdata[t->ri++] = acpi_ec_read_data(ec);
552 if (t->rlen == t->ri) {
553 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
554 if (t->command == ACPI_EC_COMMAND_QUERY)
555 ec_dbg_evt("Command(%s) completed by hardware",
556 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
557 wakeup = true;
558 }
559 } else
560 goto err;
561 } else if (t->wlen == t->wi &&
562 (status & ACPI_EC_FLAG_IBF) == 0) {
563 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
564 wakeup = true;
565 }
566 goto out;
567 } else {
568 if (EC_FLAGS_QUERY_HANDSHAKE &&
569 !(status & ACPI_EC_FLAG_SCI) &&
570 (t->command == ACPI_EC_COMMAND_QUERY)) {
571 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
572 t->rdata[t->ri++] = 0x00;
573 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
574 ec_dbg_evt("Command(%s) completed by software",
575 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
576 wakeup = true;
577 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
578 acpi_ec_write_cmd(ec, t->command);
579 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
580 } else
581 goto err;
582 goto out;
583 }
584 err:
585 /*
586 * If SCI bit is set, then don't think it's a false IRQ
587 * otherwise will take a not handled IRQ as a false one.
588 */
589 if (!(status & ACPI_EC_FLAG_SCI)) {
590 if (in_interrupt() && t) {
591 if (t->irq_count < ec_storm_threshold)
592 ++t->irq_count;
593 /* Allow triggering on 0 threshold */
594 if (t->irq_count == ec_storm_threshold)
595 acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
596 }
597 }
598 out:
599 if (status & ACPI_EC_FLAG_SCI)
600 acpi_ec_submit_query(ec);
601 if (wakeup && in_interrupt())
602 wake_up(&ec->wait);
603 }
604
605 static void start_transaction(struct acpi_ec *ec)
606 {
607 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
608 ec->curr->flags = 0;
609 }
610
611 static int ec_guard(struct acpi_ec *ec)
612 {
613 unsigned long guard = usecs_to_jiffies(ec_polling_guard);
614 unsigned long timeout = ec->timestamp + guard;
615
616 /* Ensure guarding period before polling EC status */
617 do {
618 if (ec_busy_polling) {
619 /* Perform busy polling */
620 if (ec_transaction_completed(ec))
621 return 0;
622 udelay(jiffies_to_usecs(guard));
623 } else {
624 /*
625 * Perform wait polling
626 * 1. Wait the transaction to be completed by the
627 * GPE handler after the transaction enters
628 * ACPI_EC_COMMAND_POLL state.
629 * 2. A special guarding logic is also required
630 * for event clearing mode "event" before the
631 * transaction enters ACPI_EC_COMMAND_POLL
632 * state.
633 */
634 if (!ec_transaction_polled(ec) &&
635 !acpi_ec_guard_event(ec))
636 break;
637 if (wait_event_timeout(ec->wait,
638 ec_transaction_completed(ec),
639 guard))
640 return 0;
641 }
642 } while (time_before(jiffies, timeout));
643 return -ETIME;
644 }
645
646 static int ec_poll(struct acpi_ec *ec)
647 {
648 unsigned long flags;
649 int repeat = 5; /* number of command restarts */
650
651 while (repeat--) {
652 unsigned long delay = jiffies +
653 msecs_to_jiffies(ec_delay);
654 do {
655 if (!ec_guard(ec))
656 return 0;
657 spin_lock_irqsave(&ec->lock, flags);
658 advance_transaction(ec);
659 spin_unlock_irqrestore(&ec->lock, flags);
660 } while (time_before(jiffies, delay));
661 pr_debug("controller reset, restart transaction\n");
662 spin_lock_irqsave(&ec->lock, flags);
663 start_transaction(ec);
664 spin_unlock_irqrestore(&ec->lock, flags);
665 }
666 return -ETIME;
667 }
668
669 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
670 struct transaction *t)
671 {
672 unsigned long tmp;
673 int ret = 0;
674
675 /* start transaction */
676 spin_lock_irqsave(&ec->lock, tmp);
677 /* Enable GPE for command processing (IBF=0/OBF=1) */
678 if (!acpi_ec_submit_flushable_request(ec)) {
679 ret = -EINVAL;
680 goto unlock;
681 }
682 ec_dbg_ref(ec, "Increase command");
683 /* following two actions should be kept atomic */
684 ec->curr = t;
685 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
686 start_transaction(ec);
687 spin_unlock_irqrestore(&ec->lock, tmp);
688
689 ret = ec_poll(ec);
690
691 spin_lock_irqsave(&ec->lock, tmp);
692 if (t->irq_count == ec_storm_threshold)
693 acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
694 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
695 ec->curr = NULL;
696 /* Disable GPE for command processing (IBF=0/OBF=1) */
697 acpi_ec_complete_request(ec);
698 ec_dbg_ref(ec, "Decrease command");
699 unlock:
700 spin_unlock_irqrestore(&ec->lock, tmp);
701 return ret;
702 }
703
704 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
705 {
706 int status;
707 u32 glk;
708
709 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
710 return -EINVAL;
711 if (t->rdata)
712 memset(t->rdata, 0, t->rlen);
713
714 mutex_lock(&ec->mutex);
715 if (ec->global_lock) {
716 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
717 if (ACPI_FAILURE(status)) {
718 status = -ENODEV;
719 goto unlock;
720 }
721 }
722
723 status = acpi_ec_transaction_unlocked(ec, t);
724
725 if (ec->global_lock)
726 acpi_release_global_lock(glk);
727 unlock:
728 mutex_unlock(&ec->mutex);
729 return status;
730 }
731
732 static int acpi_ec_burst_enable(struct acpi_ec *ec)
733 {
734 u8 d;
735 struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
736 .wdata = NULL, .rdata = &d,
737 .wlen = 0, .rlen = 1};
738
739 return acpi_ec_transaction(ec, &t);
740 }
741
742 static int acpi_ec_burst_disable(struct acpi_ec *ec)
743 {
744 struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
745 .wdata = NULL, .rdata = NULL,
746 .wlen = 0, .rlen = 0};
747
748 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
749 acpi_ec_transaction(ec, &t) : 0;
750 }
751
752 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
753 {
754 int result;
755 u8 d;
756 struct transaction t = {.command = ACPI_EC_COMMAND_READ,
757 .wdata = &address, .rdata = &d,
758 .wlen = 1, .rlen = 1};
759
760 result = acpi_ec_transaction(ec, &t);
761 *data = d;
762 return result;
763 }
764
765 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
766 {
767 u8 wdata[2] = { address, data };
768 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
769 .wdata = wdata, .rdata = NULL,
770 .wlen = 2, .rlen = 0};
771
772 return acpi_ec_transaction(ec, &t);
773 }
774
775 int ec_read(u8 addr, u8 *val)
776 {
777 int err;
778 u8 temp_data;
779
780 if (!first_ec)
781 return -ENODEV;
782
783 err = acpi_ec_read(first_ec, addr, &temp_data);
784
785 if (!err) {
786 *val = temp_data;
787 return 0;
788 }
789 return err;
790 }
791 EXPORT_SYMBOL(ec_read);
792
793 int ec_write(u8 addr, u8 val)
794 {
795 int err;
796
797 if (!first_ec)
798 return -ENODEV;
799
800 err = acpi_ec_write(first_ec, addr, val);
801
802 return err;
803 }
804 EXPORT_SYMBOL(ec_write);
805
806 int ec_transaction(u8 command,
807 const u8 *wdata, unsigned wdata_len,
808 u8 *rdata, unsigned rdata_len)
809 {
810 struct transaction t = {.command = command,
811 .wdata = wdata, .rdata = rdata,
812 .wlen = wdata_len, .rlen = rdata_len};
813
814 if (!first_ec)
815 return -ENODEV;
816
817 return acpi_ec_transaction(first_ec, &t);
818 }
819 EXPORT_SYMBOL(ec_transaction);
820
821 /* Get the handle to the EC device */
822 acpi_handle ec_get_handle(void)
823 {
824 if (!first_ec)
825 return NULL;
826 return first_ec->handle;
827 }
828 EXPORT_SYMBOL(ec_get_handle);
829
830 /*
831 * Process _Q events that might have accumulated in the EC.
832 * Run with locked ec mutex.
833 */
834 static void acpi_ec_clear(struct acpi_ec *ec)
835 {
836 int i, status;
837 u8 value = 0;
838
839 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
840 status = acpi_ec_query(ec, &value);
841 if (status || !value)
842 break;
843 }
844
845 if (unlikely(i == ACPI_EC_CLEAR_MAX))
846 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
847 else
848 pr_info("%d stale EC events cleared\n", i);
849 }
850
851 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
852 {
853 unsigned long flags;
854
855 spin_lock_irqsave(&ec->lock, flags);
856 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
857 ec_dbg_drv("Starting EC");
858 /* Enable GPE for event processing (SCI_EVT=1) */
859 if (!resuming) {
860 acpi_ec_submit_request(ec);
861 ec_dbg_ref(ec, "Increase driver");
862 }
863 ec_log_drv("EC started");
864 }
865 spin_unlock_irqrestore(&ec->lock, flags);
866 }
867
868 static bool acpi_ec_stopped(struct acpi_ec *ec)
869 {
870 unsigned long flags;
871 bool flushed;
872
873 spin_lock_irqsave(&ec->lock, flags);
874 flushed = acpi_ec_flushed(ec);
875 spin_unlock_irqrestore(&ec->lock, flags);
876 return flushed;
877 }
878
879 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
880 {
881 unsigned long flags;
882
883 spin_lock_irqsave(&ec->lock, flags);
884 if (acpi_ec_started(ec)) {
885 ec_dbg_drv("Stopping EC");
886 set_bit(EC_FLAGS_STOPPED, &ec->flags);
887 spin_unlock_irqrestore(&ec->lock, flags);
888 wait_event(ec->wait, acpi_ec_stopped(ec));
889 spin_lock_irqsave(&ec->lock, flags);
890 /* Disable GPE for event processing (SCI_EVT=1) */
891 if (!suspending) {
892 acpi_ec_complete_request(ec);
893 ec_dbg_ref(ec, "Decrease driver");
894 }
895 clear_bit(EC_FLAGS_STARTED, &ec->flags);
896 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
897 ec_log_drv("EC stopped");
898 }
899 spin_unlock_irqrestore(&ec->lock, flags);
900 }
901
902 void acpi_ec_block_transactions(void)
903 {
904 struct acpi_ec *ec = first_ec;
905
906 if (!ec)
907 return;
908
909 mutex_lock(&ec->mutex);
910 /* Prevent transactions from being carried out */
911 acpi_ec_stop(ec, true);
912 mutex_unlock(&ec->mutex);
913 }
914
915 void acpi_ec_unblock_transactions(void)
916 {
917 struct acpi_ec *ec = first_ec;
918
919 if (!ec)
920 return;
921
922 /* Allow transactions to be carried out again */
923 acpi_ec_start(ec, true);
924
925 if (EC_FLAGS_CLEAR_ON_RESUME)
926 acpi_ec_clear(ec);
927 }
928
929 void acpi_ec_unblock_transactions_early(void)
930 {
931 /*
932 * Allow transactions to happen again (this function is called from
933 * atomic context during wakeup, so we don't need to acquire the mutex).
934 */
935 if (first_ec)
936 acpi_ec_start(first_ec, true);
937 }
938
939 /* --------------------------------------------------------------------------
940 Event Management
941 -------------------------------------------------------------------------- */
942 static struct acpi_ec_query_handler *
943 acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
944 {
945 if (handler)
946 kref_get(&handler->kref);
947 return handler;
948 }
949
950 static struct acpi_ec_query_handler *
951 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
952 {
953 struct acpi_ec_query_handler *handler;
954 bool found = false;
955
956 mutex_lock(&ec->mutex);
957 list_for_each_entry(handler, &ec->list, node) {
958 if (value == handler->query_bit) {
959 found = true;
960 break;
961 }
962 }
963 mutex_unlock(&ec->mutex);
964 return found ? acpi_ec_get_query_handler(handler) : NULL;
965 }
966
967 static void acpi_ec_query_handler_release(struct kref *kref)
968 {
969 struct acpi_ec_query_handler *handler =
970 container_of(kref, struct acpi_ec_query_handler, kref);
971
972 kfree(handler);
973 }
974
975 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
976 {
977 kref_put(&handler->kref, acpi_ec_query_handler_release);
978 }
979
980 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
981 acpi_handle handle, acpi_ec_query_func func,
982 void *data)
983 {
984 struct acpi_ec_query_handler *handler =
985 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
986
987 if (!handler)
988 return -ENOMEM;
989
990 handler->query_bit = query_bit;
991 handler->handle = handle;
992 handler->func = func;
993 handler->data = data;
994 mutex_lock(&ec->mutex);
995 kref_init(&handler->kref);
996 list_add(&handler->node, &ec->list);
997 mutex_unlock(&ec->mutex);
998 return 0;
999 }
1000 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
1001
1002 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1003 bool remove_all, u8 query_bit)
1004 {
1005 struct acpi_ec_query_handler *handler, *tmp;
1006 LIST_HEAD(free_list);
1007
1008 mutex_lock(&ec->mutex);
1009 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1010 if (remove_all || query_bit == handler->query_bit) {
1011 list_del_init(&handler->node);
1012 list_add(&handler->node, &free_list);
1013 }
1014 }
1015 mutex_unlock(&ec->mutex);
1016 list_for_each_entry_safe(handler, tmp, &free_list, node)
1017 acpi_ec_put_query_handler(handler);
1018 }
1019
1020 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1021 {
1022 acpi_ec_remove_query_handlers(ec, false, query_bit);
1023 }
1024 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
1025
1026 static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
1027 {
1028 struct acpi_ec_query *q;
1029 struct transaction *t;
1030
1031 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
1032 if (!q)
1033 return NULL;
1034 INIT_WORK(&q->work, acpi_ec_event_processor);
1035 t = &q->transaction;
1036 t->command = ACPI_EC_COMMAND_QUERY;
1037 t->rdata = pval;
1038 t->rlen = 1;
1039 return q;
1040 }
1041
1042 static void acpi_ec_delete_query(struct acpi_ec_query *q)
1043 {
1044 if (q) {
1045 if (q->handler)
1046 acpi_ec_put_query_handler(q->handler);
1047 kfree(q);
1048 }
1049 }
1050
1051 static void acpi_ec_event_processor(struct work_struct *work)
1052 {
1053 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
1054 struct acpi_ec_query_handler *handler = q->handler;
1055
1056 ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
1057 if (handler->func)
1058 handler->func(handler->data);
1059 else if (handler->handle)
1060 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
1061 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
1062 acpi_ec_delete_query(q);
1063 }
1064
1065 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1066 {
1067 u8 value = 0;
1068 int result;
1069 struct acpi_ec_query *q;
1070
1071 q = acpi_ec_create_query(&value);
1072 if (!q)
1073 return -ENOMEM;
1074
1075 /*
1076 * Query the EC to find out which _Qxx method we need to evaluate.
1077 * Note that successful completion of the query causes the ACPI_EC_SCI
1078 * bit to be cleared (and thus clearing the interrupt source).
1079 */
1080 result = acpi_ec_transaction(ec, &q->transaction);
1081 if (!value)
1082 result = -ENODATA;
1083 if (result)
1084 goto err_exit;
1085
1086 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1087 if (!q->handler) {
1088 result = -ENODATA;
1089 goto err_exit;
1090 }
1091
1092 /*
1093 * It is reported that _Qxx are evaluated in a parallel way on
1094 * Windows:
1095 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1096 *
1097 * Put this log entry before schedule_work() in order to make
1098 * it appearing before any other log entries occurred during the
1099 * work queue execution.
1100 */
1101 ec_dbg_evt("Query(0x%02x) scheduled", value);
1102 if (!schedule_work(&q->work)) {
1103 ec_dbg_evt("Query(0x%02x) overlapped", value);
1104 result = -EBUSY;
1105 }
1106
1107 err_exit:
1108 if (result)
1109 acpi_ec_delete_query(q);
1110 if (data)
1111 *data = value;
1112 return result;
1113 }
1114
1115 static void acpi_ec_check_event(struct acpi_ec *ec)
1116 {
1117 unsigned long flags;
1118
1119 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
1120 if (ec_guard(ec)) {
1121 spin_lock_irqsave(&ec->lock, flags);
1122 /*
1123 * Take care of the SCI_EVT unless no one else is
1124 * taking care of it.
1125 */
1126 if (!ec->curr)
1127 advance_transaction(ec);
1128 spin_unlock_irqrestore(&ec->lock, flags);
1129 }
1130 }
1131 }
1132
1133 static void acpi_ec_event_handler(struct work_struct *work)
1134 {
1135 unsigned long flags;
1136 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1137
1138 ec_dbg_evt("Event started");
1139
1140 spin_lock_irqsave(&ec->lock, flags);
1141 while (ec->nr_pending_queries) {
1142 spin_unlock_irqrestore(&ec->lock, flags);
1143 (void)acpi_ec_query(ec, NULL);
1144 spin_lock_irqsave(&ec->lock, flags);
1145 ec->nr_pending_queries--;
1146 /*
1147 * Before exit, make sure that this work item can be
1148 * scheduled again. There might be QR_EC failures, leaving
1149 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1150 * item from being scheduled again.
1151 */
1152 if (!ec->nr_pending_queries) {
1153 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
1154 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
1155 acpi_ec_complete_query(ec);
1156 }
1157 }
1158 spin_unlock_irqrestore(&ec->lock, flags);
1159
1160 ec_dbg_evt("Event stopped");
1161
1162 acpi_ec_check_event(ec);
1163 }
1164
1165 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
1166 u32 gpe_number, void *data)
1167 {
1168 unsigned long flags;
1169 struct acpi_ec *ec = data;
1170
1171 spin_lock_irqsave(&ec->lock, flags);
1172 advance_transaction(ec);
1173 spin_unlock_irqrestore(&ec->lock, flags);
1174 return ACPI_INTERRUPT_HANDLED;
1175 }
1176
1177 /* --------------------------------------------------------------------------
1178 * Address Space Management
1179 * -------------------------------------------------------------------------- */
1180
1181 static acpi_status
1182 acpi_ec_space_handler(u32 function, acpi_physical_address address,
1183 u32 bits, u64 *value64,
1184 void *handler_context, void *region_context)
1185 {
1186 struct acpi_ec *ec = handler_context;
1187 int result = 0, i, bytes = bits / 8;
1188 u8 *value = (u8 *)value64;
1189
1190 if ((address > 0xFF) || !value || !handler_context)
1191 return AE_BAD_PARAMETER;
1192
1193 if (function != ACPI_READ && function != ACPI_WRITE)
1194 return AE_BAD_PARAMETER;
1195
1196 if (ec_busy_polling || bits > 8)
1197 acpi_ec_burst_enable(ec);
1198
1199 for (i = 0; i < bytes; ++i, ++address, ++value)
1200 result = (function == ACPI_READ) ?
1201 acpi_ec_read(ec, address, value) :
1202 acpi_ec_write(ec, address, *value);
1203
1204 if (ec_busy_polling || bits > 8)
1205 acpi_ec_burst_disable(ec);
1206
1207 switch (result) {
1208 case -EINVAL:
1209 return AE_BAD_PARAMETER;
1210 case -ENODEV:
1211 return AE_NOT_FOUND;
1212 case -ETIME:
1213 return AE_TIME;
1214 default:
1215 return AE_OK;
1216 }
1217 }
1218
1219 /* --------------------------------------------------------------------------
1220 * Driver Interface
1221 * -------------------------------------------------------------------------- */
1222
1223 static acpi_status
1224 ec_parse_io_ports(struct acpi_resource *resource, void *context);
1225
1226 static struct acpi_ec *make_acpi_ec(void)
1227 {
1228 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1229
1230 if (!ec)
1231 return NULL;
1232 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
1233 mutex_init(&ec->mutex);
1234 init_waitqueue_head(&ec->wait);
1235 INIT_LIST_HEAD(&ec->list);
1236 spin_lock_init(&ec->lock);
1237 INIT_WORK(&ec->work, acpi_ec_event_handler);
1238 ec->timestamp = jiffies;
1239 return ec;
1240 }
1241
1242 static acpi_status
1243 acpi_ec_register_query_methods(acpi_handle handle, u32 level,
1244 void *context, void **return_value)
1245 {
1246 char node_name[5];
1247 struct acpi_buffer buffer = { sizeof(node_name), node_name };
1248 struct acpi_ec *ec = context;
1249 int value = 0;
1250 acpi_status status;
1251
1252 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
1253
1254 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
1255 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1256 return AE_OK;
1257 }
1258
1259 static acpi_status
1260 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
1261 {
1262 acpi_status status;
1263 unsigned long long tmp = 0;
1264 struct acpi_ec *ec = context;
1265
1266 /* clear addr values, ec_parse_io_ports depend on it */
1267 ec->command_addr = ec->data_addr = 0;
1268
1269 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1270 ec_parse_io_ports, ec);
1271 if (ACPI_FAILURE(status))
1272 return status;
1273
1274 /* Get GPE bit assignment (EC events). */
1275 /* TODO: Add support for _GPE returning a package */
1276 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
1277 if (ACPI_FAILURE(status))
1278 return status;
1279 ec->gpe = tmp;
1280 /* Use the global lock for all EC transactions? */
1281 tmp = 0;
1282 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
1283 ec->global_lock = tmp;
1284 ec->handle = handle;
1285 return AE_CTRL_TERMINATE;
1286 }
1287
1288 static int ec_install_handlers(struct acpi_ec *ec)
1289 {
1290 acpi_status status;
1291
1292 acpi_ec_start(ec, false);
1293
1294 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1295 status = acpi_install_address_space_handler(ec->handle,
1296 ACPI_ADR_SPACE_EC,
1297 &acpi_ec_space_handler,
1298 NULL, ec);
1299 if (ACPI_FAILURE(status)) {
1300 if (status == AE_NOT_FOUND) {
1301 /*
1302 * Maybe OS fails in evaluating the _REG
1303 * object. The AE_NOT_FOUND error will be
1304 * ignored and OS * continue to initialize
1305 * EC.
1306 */
1307 pr_err("Fail in evaluating the _REG object"
1308 " of EC device. Broken bios is suspected.\n");
1309 } else {
1310 acpi_ec_stop(ec, false);
1311 return -ENODEV;
1312 }
1313 }
1314 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1315 }
1316
1317 if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1318 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1319 ACPI_GPE_EDGE_TRIGGERED,
1320 &acpi_ec_gpe_handler, ec);
1321 /* This is not fatal as we can poll EC events */
1322 if (ACPI_SUCCESS(status)) {
1323 set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1324 if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1325 ec->reference_count >= 1)
1326 acpi_ec_enable_gpe(ec, true);
1327 }
1328 }
1329
1330 return 0;
1331 }
1332
1333 static void ec_remove_handlers(struct acpi_ec *ec)
1334 {
1335 acpi_ec_stop(ec, false);
1336
1337 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1338 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
1339 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
1340 pr_err("failed to remove space handler\n");
1341 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1342 }
1343
1344 if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
1345 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1346 &acpi_ec_gpe_handler)))
1347 pr_err("failed to remove gpe handler\n");
1348 clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
1349 }
1350 }
1351
1352 static int acpi_ec_add(struct acpi_device *device)
1353 {
1354 struct acpi_ec *ec = NULL;
1355 int ret;
1356
1357 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
1358 strcpy(acpi_device_class(device), ACPI_EC_CLASS);
1359
1360 /* Check for boot EC */
1361 if (boot_ec &&
1362 (boot_ec->handle == device->handle ||
1363 boot_ec->handle == ACPI_ROOT_OBJECT)) {
1364 ec = boot_ec;
1365 boot_ec = NULL;
1366 } else {
1367 ec = make_acpi_ec();
1368 if (!ec)
1369 return -ENOMEM;
1370 }
1371 if (ec_parse_device(device->handle, 0, ec, NULL) !=
1372 AE_CTRL_TERMINATE) {
1373 kfree(ec);
1374 return -EINVAL;
1375 }
1376
1377 /* Find and register all query methods */
1378 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1379 acpi_ec_register_query_methods, NULL, ec, NULL);
1380
1381 if (!first_ec)
1382 first_ec = ec;
1383 device->driver_data = ec;
1384
1385 ret = !!request_region(ec->data_addr, 1, "EC data");
1386 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1387 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1388 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1389
1390 pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
1391 ec->gpe, ec->command_addr, ec->data_addr);
1392
1393 ret = ec_install_handlers(ec);
1394
1395 /* Reprobe devices depending on the EC */
1396 acpi_walk_dep_device_list(ec->handle);
1397
1398 /* EC is fully operational, allow queries */
1399 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
1400
1401 /* Clear stale _Q events if hardware might require that */
1402 if (EC_FLAGS_CLEAR_ON_RESUME)
1403 acpi_ec_clear(ec);
1404 return ret;
1405 }
1406
1407 static int acpi_ec_remove(struct acpi_device *device)
1408 {
1409 struct acpi_ec *ec;
1410
1411 if (!device)
1412 return -EINVAL;
1413
1414 ec = acpi_driver_data(device);
1415 ec_remove_handlers(ec);
1416 acpi_ec_remove_query_handlers(ec, true, 0);
1417 release_region(ec->data_addr, 1);
1418 release_region(ec->command_addr, 1);
1419 device->driver_data = NULL;
1420 if (ec == first_ec)
1421 first_ec = NULL;
1422 kfree(ec);
1423 return 0;
1424 }
1425
1426 static acpi_status
1427 ec_parse_io_ports(struct acpi_resource *resource, void *context)
1428 {
1429 struct acpi_ec *ec = context;
1430
1431 if (resource->type != ACPI_RESOURCE_TYPE_IO)
1432 return AE_OK;
1433
1434 /*
1435 * The first address region returned is the data port, and
1436 * the second address region returned is the status/command
1437 * port.
1438 */
1439 if (ec->data_addr == 0)
1440 ec->data_addr = resource->data.io.minimum;
1441 else if (ec->command_addr == 0)
1442 ec->command_addr = resource->data.io.minimum;
1443 else
1444 return AE_CTRL_TERMINATE;
1445
1446 return AE_OK;
1447 }
1448
1449 int __init acpi_boot_ec_enable(void)
1450 {
1451 if (!boot_ec)
1452 return 0;
1453 if (!ec_install_handlers(boot_ec)) {
1454 first_ec = boot_ec;
1455 return 0;
1456 }
1457 return -EFAULT;
1458 }
1459
1460 static const struct acpi_device_id ec_device_ids[] = {
1461 {"PNP0C09", 0},
1462 {"", 0},
1463 };
1464
1465 /* Some BIOS do not survive early DSDT scan, skip it */
1466 static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
1467 {
1468 EC_FLAGS_SKIP_DSDT_SCAN = 1;
1469 return 0;
1470 }
1471
1472 /* ASUStek often supplies us with broken ECDT, validate it */
1473 static int ec_validate_ecdt(const struct dmi_system_id *id)
1474 {
1475 EC_FLAGS_VALIDATE_ECDT = 1;
1476 return 0;
1477 }
1478
1479 #if 0
1480 /*
1481 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1482 * set, for which case, we complete the QR_EC without issuing it to the
1483 * firmware.
1484 * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1485 * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1486 */
1487 static int ec_flag_query_handshake(const struct dmi_system_id *id)
1488 {
1489 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1490 EC_FLAGS_QUERY_HANDSHAKE = 1;
1491 return 0;
1492 }
1493 #endif
1494
1495 /*
1496 * On some hardware it is necessary to clear events accumulated by the EC during
1497 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1498 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1499 *
1500 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1501 *
1502 * Ideally, the EC should also be instructed NOT to accumulate events during
1503 * sleep (which Windows seems to do somehow), but the interface to control this
1504 * behaviour is not known at this time.
1505 *
1506 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1507 * however it is very likely that other Samsung models are affected.
1508 *
1509 * On systems which don't accumulate _Q events during sleep, this extra check
1510 * should be harmless.
1511 */
1512 static int ec_clear_on_resume(const struct dmi_system_id *id)
1513 {
1514 pr_debug("Detected system needing EC poll on resume.\n");
1515 EC_FLAGS_CLEAR_ON_RESUME = 1;
1516 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1517 return 0;
1518 }
1519
1520 static struct dmi_system_id ec_dmi_table[] __initdata = {
1521 {
1522 ec_skip_dsdt_scan, "Compal JFL92", {
1523 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
1524 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
1525 {
1526 ec_validate_ecdt, "MSI MS-171F", {
1527 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1528 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
1529 {
1530 ec_validate_ecdt, "ASUS hardware", {
1531 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
1532 {
1533 ec_validate_ecdt, "ASUS hardware", {
1534 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
1535 {
1536 ec_skip_dsdt_scan, "HP Folio 13", {
1537 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1538 DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
1539 {
1540 ec_validate_ecdt, "ASUS hardware", {
1541 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
1543 {
1544 ec_clear_on_resume, "Samsung hardware", {
1545 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1546 {},
1547 };
1548
1549 int __init acpi_ec_ecdt_probe(void)
1550 {
1551 acpi_status status;
1552 struct acpi_ec *saved_ec = NULL;
1553 struct acpi_table_ecdt *ecdt_ptr;
1554
1555 boot_ec = make_acpi_ec();
1556 if (!boot_ec)
1557 return -ENOMEM;
1558 /*
1559 * Generate a boot ec context
1560 */
1561 dmi_check_system(ec_dmi_table);
1562 status = acpi_get_table(ACPI_SIG_ECDT, 1,
1563 (struct acpi_table_header **)&ecdt_ptr);
1564 if (ACPI_SUCCESS(status)) {
1565 pr_info("EC description table is found, configuring boot EC\n");
1566 boot_ec->command_addr = ecdt_ptr->control.address;
1567 boot_ec->data_addr = ecdt_ptr->data.address;
1568 boot_ec->gpe = ecdt_ptr->gpe;
1569 boot_ec->handle = ACPI_ROOT_OBJECT;
1570 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
1571 &boot_ec->handle);
1572 /* Don't trust ECDT, which comes from ASUSTek */
1573 if (!EC_FLAGS_VALIDATE_ECDT)
1574 goto install;
1575 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
1576 if (!saved_ec)
1577 return -ENOMEM;
1578 /* fall through */
1579 }
1580
1581 if (EC_FLAGS_SKIP_DSDT_SCAN) {
1582 kfree(saved_ec);
1583 return -ENODEV;
1584 }
1585
1586 /* This workaround is needed only on some broken machines,
1587 * which require early EC, but fail to provide ECDT */
1588 pr_debug("Look up EC in DSDT\n");
1589 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
1590 boot_ec, NULL);
1591 /* Check that acpi_get_devices actually find something */
1592 if (ACPI_FAILURE(status) || !boot_ec->handle)
1593 goto error;
1594 if (saved_ec) {
1595 /* try to find good ECDT from ASUSTek */
1596 if (saved_ec->command_addr != boot_ec->command_addr ||
1597 saved_ec->data_addr != boot_ec->data_addr ||
1598 saved_ec->gpe != boot_ec->gpe ||
1599 saved_ec->handle != boot_ec->handle)
1600 pr_info("ASUSTek keeps feeding us with broken "
1601 "ECDT tables, which are very hard to workaround. "
1602 "Trying to use DSDT EC info instead. Please send "
1603 "output of acpidump to linux-acpi@vger.kernel.org\n");
1604 kfree(saved_ec);
1605 saved_ec = NULL;
1606 } else {
1607 /* We really need to limit this workaround, the only ASUS,
1608 * which needs it, has fake EC._INI method, so use it as flag.
1609 * Keep boot_ec struct as it will be needed soon.
1610 */
1611 if (!dmi_name_in_vendors("ASUS") ||
1612 !acpi_has_method(boot_ec->handle, "_INI"))
1613 return -ENODEV;
1614 }
1615 install:
1616 if (!ec_install_handlers(boot_ec)) {
1617 first_ec = boot_ec;
1618 return 0;
1619 }
1620 error:
1621 kfree(boot_ec);
1622 kfree(saved_ec);
1623 boot_ec = NULL;
1624 return -ENODEV;
1625 }
1626
1627 static int param_set_event_clearing(const char *val, struct kernel_param *kp)
1628 {
1629 int result = 0;
1630
1631 if (!strncmp(val, "status", sizeof("status") - 1)) {
1632 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
1633 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
1634 } else if (!strncmp(val, "query", sizeof("query") - 1)) {
1635 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
1636 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
1637 } else if (!strncmp(val, "event", sizeof("event") - 1)) {
1638 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
1639 pr_info("Assuming SCI_EVT clearing on event reads\n");
1640 } else
1641 result = -EINVAL;
1642 return result;
1643 }
1644
1645 static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
1646 {
1647 switch (ec_event_clearing) {
1648 case ACPI_EC_EVT_TIMING_STATUS:
1649 return sprintf(buffer, "status");
1650 case ACPI_EC_EVT_TIMING_QUERY:
1651 return sprintf(buffer, "query");
1652 case ACPI_EC_EVT_TIMING_EVENT:
1653 return sprintf(buffer, "event");
1654 default:
1655 return sprintf(buffer, "invalid");
1656 }
1657 return 0;
1658 }
1659
1660 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
1661 NULL, 0644);
1662 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
1663
1664 static struct acpi_driver acpi_ec_driver = {
1665 .name = "ec",
1666 .class = ACPI_EC_CLASS,
1667 .ids = ec_device_ids,
1668 .ops = {
1669 .add = acpi_ec_add,
1670 .remove = acpi_ec_remove,
1671 },
1672 };
1673
1674 int __init acpi_ec_init(void)
1675 {
1676 int result = 0;
1677
1678 /* Now register the driver for the EC */
1679 result = acpi_bus_register_driver(&acpi_ec_driver);
1680 if (result < 0)
1681 return -ENODEV;
1682
1683 return result;
1684 }
1685
1686 /* EC driver currently not unloadable */
1687 #if 0
1688 static void __exit acpi_ec_exit(void)
1689 {
1690
1691 acpi_bus_unregister_driver(&acpi_ec_driver);
1692 }
1693 #endif /* 0 */
This page took 0.093113 seconds and 6 git commands to generate.