2 * linux/arch/arm/plat-omap/dmtimer.c
4 * OMAP Dual-Mode Timers
6 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
7 * Tarun Kanti DebBarma <tarun.kanti@ti.com>
8 * Thara Gopinath <thara@ti.com>
10 * dmtimer adaptation to platform_driver.
12 * Copyright (C) 2005 Nokia Corporation
13 * OMAP2 support by Juha Yrjola
14 * API improvements and OMAP2 clock framework support by Timo Teras
16 * Copyright (C) 2009 Texas Instruments
17 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
19 * This program is free software; you can redistribute it and/or modify it
20 * under the terms of the GNU General Public License as published by the
21 * Free Software Foundation; either version 2 of the License, or (at your
22 * option) any later version.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
27 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * You should have received a copy of the GNU General Public License along
34 * with this program; if not, write to the Free Software Foundation, Inc.,
35 * 675 Mass Ave, Cambridge, MA 02139, USA.
38 #include <linux/module.h>
40 #include <linux/slab.h>
41 #include <linux/err.h>
42 #include <linux/pm_runtime.h>
44 #include <plat/dmtimer.h>
45 #include <plat/omap-pm.h>
47 #include <mach/hardware.h>
49 static u32 omap_reserved_systimers
;
50 static LIST_HEAD(omap_timer_list
);
51 static DEFINE_SPINLOCK(dm_timer_lock
);
54 * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
55 * @timer: timer pointer over which read operation to perform
56 * @reg: lowest byte holds the register offset
58 * The posted mode bit is encoded in reg. Note that in posted mode write
59 * pending bit must be checked. Otherwise a read of a non completed write
60 * will produce an error.
62 static inline u32
omap_dm_timer_read_reg(struct omap_dm_timer
*timer
, u32 reg
)
64 WARN_ON((reg
& 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET
);
65 return __omap_dm_timer_read(timer
, reg
, timer
->posted
);
69 * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
70 * @timer: timer pointer over which write operation is to perform
71 * @reg: lowest byte holds the register offset
72 * @value: data to write into the register
74 * The posted mode bit is encoded in reg. Note that in posted mode the write
75 * pending bit must be checked. Otherwise a write on a register which has a
76 * pending write will be lost.
78 static void omap_dm_timer_write_reg(struct omap_dm_timer
*timer
, u32 reg
,
81 WARN_ON((reg
& 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET
);
82 __omap_dm_timer_write(timer
, reg
, value
, timer
->posted
);
85 static void omap_timer_restore_context(struct omap_dm_timer
*timer
)
87 if (timer
->revision
== 1)
88 __raw_writel(timer
->context
.tistat
, timer
->sys_stat
);
90 __raw_writel(timer
->context
.tisr
, timer
->irq_stat
);
91 omap_dm_timer_write_reg(timer
, OMAP_TIMER_WAKEUP_EN_REG
,
93 omap_dm_timer_write_reg(timer
, OMAP_TIMER_COUNTER_REG
,
95 omap_dm_timer_write_reg(timer
, OMAP_TIMER_LOAD_REG
,
97 omap_dm_timer_write_reg(timer
, OMAP_TIMER_MATCH_REG
,
99 omap_dm_timer_write_reg(timer
, OMAP_TIMER_IF_CTRL_REG
,
100 timer
->context
.tsicr
);
101 __raw_writel(timer
->context
.tier
, timer
->irq_ena
);
102 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
,
103 timer
->context
.tclr
);
106 static void omap_dm_timer_wait_for_reset(struct omap_dm_timer
*timer
)
110 if (!timer
->sys_stat
)
114 while (!(__raw_readl(timer
->sys_stat
) & 1)) {
117 printk(KERN_ERR
"Timer failed to reset\n");
123 static void omap_dm_timer_reset(struct omap_dm_timer
*timer
)
125 omap_dm_timer_enable(timer
);
126 if (timer
->pdev
->id
!= 1) {
127 omap_dm_timer_write_reg(timer
, OMAP_TIMER_IF_CTRL_REG
, 0x06);
128 omap_dm_timer_wait_for_reset(timer
);
131 __omap_dm_timer_reset(timer
, 0, 0);
132 omap_dm_timer_disable(timer
);
136 int omap_dm_timer_prepare(struct omap_dm_timer
*timer
)
140 timer
->fclk
= clk_get(&timer
->pdev
->dev
, "fck");
141 if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer
->fclk
))) {
143 dev_err(&timer
->pdev
->dev
, ": No fclk handle.\n");
147 if (timer
->capability
& OMAP_TIMER_NEEDS_RESET
)
148 omap_dm_timer_reset(timer
);
150 ret
= omap_dm_timer_set_source(timer
, OMAP_TIMER_SRC_32_KHZ
);
156 static inline u32
omap_dm_timer_reserved_systimer(int id
)
158 return (omap_reserved_systimers
& (1 << (id
- 1))) ? 1 : 0;
161 int omap_dm_timer_reserve_systimer(int id
)
163 if (omap_dm_timer_reserved_systimer(id
))
166 omap_reserved_systimers
|= (1 << (id
- 1));
171 struct omap_dm_timer
*omap_dm_timer_request(void)
173 struct omap_dm_timer
*timer
= NULL
, *t
;
177 spin_lock_irqsave(&dm_timer_lock
, flags
);
178 list_for_each_entry(t
, &omap_timer_list
, node
) {
188 ret
= omap_dm_timer_prepare(timer
);
194 spin_unlock_irqrestore(&dm_timer_lock
, flags
);
197 pr_debug("%s: timer request failed!\n", __func__
);
201 EXPORT_SYMBOL_GPL(omap_dm_timer_request
);
203 struct omap_dm_timer
*omap_dm_timer_request_specific(int id
)
205 struct omap_dm_timer
*timer
= NULL
, *t
;
209 spin_lock_irqsave(&dm_timer_lock
, flags
);
210 list_for_each_entry(t
, &omap_timer_list
, node
) {
211 if (t
->pdev
->id
== id
&& !t
->reserved
) {
219 ret
= omap_dm_timer_prepare(timer
);
225 spin_unlock_irqrestore(&dm_timer_lock
, flags
);
228 pr_debug("%s: timer%d request failed!\n", __func__
, id
);
232 EXPORT_SYMBOL_GPL(omap_dm_timer_request_specific
);
234 int omap_dm_timer_free(struct omap_dm_timer
*timer
)
236 if (unlikely(!timer
))
239 clk_put(timer
->fclk
);
241 WARN_ON(!timer
->reserved
);
245 EXPORT_SYMBOL_GPL(omap_dm_timer_free
);
247 void omap_dm_timer_enable(struct omap_dm_timer
*timer
)
249 pm_runtime_get_sync(&timer
->pdev
->dev
);
251 EXPORT_SYMBOL_GPL(omap_dm_timer_enable
);
253 void omap_dm_timer_disable(struct omap_dm_timer
*timer
)
255 pm_runtime_put(&timer
->pdev
->dev
);
257 EXPORT_SYMBOL_GPL(omap_dm_timer_disable
);
259 int omap_dm_timer_get_irq(struct omap_dm_timer
*timer
)
265 EXPORT_SYMBOL_GPL(omap_dm_timer_get_irq
);
267 #if defined(CONFIG_ARCH_OMAP1)
270 * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
271 * @inputmask: current value of idlect mask
273 __u32
omap_dm_timer_modify_idlect_mask(__u32 inputmask
)
276 struct omap_dm_timer
*timer
= NULL
;
279 /* If ARMXOR cannot be idled this function call is unnecessary */
280 if (!(inputmask
& (1 << 1)))
283 /* If any active timer is using ARMXOR return modified mask */
284 spin_lock_irqsave(&dm_timer_lock
, flags
);
285 list_for_each_entry(timer
, &omap_timer_list
, node
) {
288 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
289 if (l
& OMAP_TIMER_CTRL_ST
) {
290 if (((omap_readl(MOD_CONF_CTRL_1
) >> (i
* 2)) & 0x03) == 0)
291 inputmask
&= ~(1 << 1);
293 inputmask
&= ~(1 << 2);
297 spin_unlock_irqrestore(&dm_timer_lock
, flags
);
301 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask
);
305 struct clk
*omap_dm_timer_get_fclk(struct omap_dm_timer
*timer
)
311 EXPORT_SYMBOL_GPL(omap_dm_timer_get_fclk
);
313 __u32
omap_dm_timer_modify_idlect_mask(__u32 inputmask
)
319 EXPORT_SYMBOL_GPL(omap_dm_timer_modify_idlect_mask
);
323 int omap_dm_timer_trigger(struct omap_dm_timer
*timer
)
325 if (unlikely(!timer
|| pm_runtime_suspended(&timer
->pdev
->dev
))) {
326 pr_err("%s: timer not available or enabled.\n", __func__
);
330 omap_dm_timer_write_reg(timer
, OMAP_TIMER_TRIGGER_REG
, 0);
333 EXPORT_SYMBOL_GPL(omap_dm_timer_trigger
);
335 int omap_dm_timer_start(struct omap_dm_timer
*timer
)
339 if (unlikely(!timer
))
342 omap_dm_timer_enable(timer
);
344 if (!(timer
->capability
& OMAP_TIMER_ALWON
)) {
345 if (omap_pm_get_dev_context_loss_count(&timer
->pdev
->dev
) !=
346 timer
->ctx_loss_count
)
347 omap_timer_restore_context(timer
);
350 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
351 if (!(l
& OMAP_TIMER_CTRL_ST
)) {
352 l
|= OMAP_TIMER_CTRL_ST
;
353 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
, l
);
356 /* Save the context */
357 timer
->context
.tclr
= l
;
360 EXPORT_SYMBOL_GPL(omap_dm_timer_start
);
362 int omap_dm_timer_stop(struct omap_dm_timer
*timer
)
364 unsigned long rate
= 0;
366 if (unlikely(!timer
))
369 if (!(timer
->capability
& OMAP_TIMER_NEEDS_RESET
))
370 rate
= clk_get_rate(timer
->fclk
);
372 __omap_dm_timer_stop(timer
, timer
->posted
, rate
);
374 if (!(timer
->capability
& OMAP_TIMER_ALWON
))
375 timer
->ctx_loss_count
=
376 omap_pm_get_dev_context_loss_count(&timer
->pdev
->dev
);
379 * Since the register values are computed and written within
380 * __omap_dm_timer_stop, we need to use read to retrieve the
383 timer
->context
.tclr
=
384 omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
385 timer
->context
.tisr
= __raw_readl(timer
->irq_stat
);
386 omap_dm_timer_disable(timer
);
389 EXPORT_SYMBOL_GPL(omap_dm_timer_stop
);
391 int omap_dm_timer_set_source(struct omap_dm_timer
*timer
, int source
)
394 struct dmtimer_platform_data
*pdata
;
396 if (unlikely(!timer
))
399 pdata
= timer
->pdev
->dev
.platform_data
;
401 if (source
< 0 || source
>= 3)
404 ret
= pdata
->set_timer_src(timer
->pdev
, source
);
408 EXPORT_SYMBOL_GPL(omap_dm_timer_set_source
);
410 int omap_dm_timer_set_load(struct omap_dm_timer
*timer
, int autoreload
,
415 if (unlikely(!timer
))
418 omap_dm_timer_enable(timer
);
419 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
421 l
|= OMAP_TIMER_CTRL_AR
;
423 l
&= ~OMAP_TIMER_CTRL_AR
;
424 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
, l
);
425 omap_dm_timer_write_reg(timer
, OMAP_TIMER_LOAD_REG
, load
);
427 omap_dm_timer_write_reg(timer
, OMAP_TIMER_TRIGGER_REG
, 0);
428 /* Save the context */
429 timer
->context
.tclr
= l
;
430 timer
->context
.tldr
= load
;
431 omap_dm_timer_disable(timer
);
434 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load
);
436 /* Optimized set_load which removes costly spin wait in timer_start */
437 int omap_dm_timer_set_load_start(struct omap_dm_timer
*timer
, int autoreload
,
442 if (unlikely(!timer
))
445 omap_dm_timer_enable(timer
);
447 if (!(timer
->capability
& OMAP_TIMER_ALWON
)) {
448 if (omap_pm_get_dev_context_loss_count(&timer
->pdev
->dev
) !=
449 timer
->ctx_loss_count
)
450 omap_timer_restore_context(timer
);
453 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
455 l
|= OMAP_TIMER_CTRL_AR
;
456 omap_dm_timer_write_reg(timer
, OMAP_TIMER_LOAD_REG
, load
);
458 l
&= ~OMAP_TIMER_CTRL_AR
;
460 l
|= OMAP_TIMER_CTRL_ST
;
462 __omap_dm_timer_load_start(timer
, l
, load
, timer
->posted
);
464 /* Save the context */
465 timer
->context
.tclr
= l
;
466 timer
->context
.tldr
= load
;
467 timer
->context
.tcrr
= load
;
470 EXPORT_SYMBOL_GPL(omap_dm_timer_set_load_start
);
472 int omap_dm_timer_set_match(struct omap_dm_timer
*timer
, int enable
,
477 if (unlikely(!timer
))
480 omap_dm_timer_enable(timer
);
481 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
483 l
|= OMAP_TIMER_CTRL_CE
;
485 l
&= ~OMAP_TIMER_CTRL_CE
;
486 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
, l
);
487 omap_dm_timer_write_reg(timer
, OMAP_TIMER_MATCH_REG
, match
);
489 /* Save the context */
490 timer
->context
.tclr
= l
;
491 timer
->context
.tmar
= match
;
492 omap_dm_timer_disable(timer
);
495 EXPORT_SYMBOL_GPL(omap_dm_timer_set_match
);
497 int omap_dm_timer_set_pwm(struct omap_dm_timer
*timer
, int def_on
,
498 int toggle
, int trigger
)
502 if (unlikely(!timer
))
505 omap_dm_timer_enable(timer
);
506 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
507 l
&= ~(OMAP_TIMER_CTRL_GPOCFG
| OMAP_TIMER_CTRL_SCPWM
|
508 OMAP_TIMER_CTRL_PT
| (0x03 << 10));
510 l
|= OMAP_TIMER_CTRL_SCPWM
;
512 l
|= OMAP_TIMER_CTRL_PT
;
514 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
, l
);
516 /* Save the context */
517 timer
->context
.tclr
= l
;
518 omap_dm_timer_disable(timer
);
521 EXPORT_SYMBOL_GPL(omap_dm_timer_set_pwm
);
523 int omap_dm_timer_set_prescaler(struct omap_dm_timer
*timer
, int prescaler
)
527 if (unlikely(!timer
))
530 omap_dm_timer_enable(timer
);
531 l
= omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
);
532 l
&= ~(OMAP_TIMER_CTRL_PRE
| (0x07 << 2));
533 if (prescaler
>= 0x00 && prescaler
<= 0x07) {
534 l
|= OMAP_TIMER_CTRL_PRE
;
537 omap_dm_timer_write_reg(timer
, OMAP_TIMER_CTRL_REG
, l
);
539 /* Save the context */
540 timer
->context
.tclr
= l
;
541 omap_dm_timer_disable(timer
);
544 EXPORT_SYMBOL_GPL(omap_dm_timer_set_prescaler
);
546 int omap_dm_timer_set_int_enable(struct omap_dm_timer
*timer
,
549 if (unlikely(!timer
))
552 omap_dm_timer_enable(timer
);
553 __omap_dm_timer_int_enable(timer
, value
);
555 /* Save the context */
556 timer
->context
.tier
= value
;
557 timer
->context
.twer
= value
;
558 omap_dm_timer_disable(timer
);
561 EXPORT_SYMBOL_GPL(omap_dm_timer_set_int_enable
);
563 unsigned int omap_dm_timer_read_status(struct omap_dm_timer
*timer
)
567 if (unlikely(!timer
|| pm_runtime_suspended(&timer
->pdev
->dev
))) {
568 pr_err("%s: timer not available or enabled.\n", __func__
);
572 l
= __raw_readl(timer
->irq_stat
);
576 EXPORT_SYMBOL_GPL(omap_dm_timer_read_status
);
578 int omap_dm_timer_write_status(struct omap_dm_timer
*timer
, unsigned int value
)
580 if (unlikely(!timer
|| pm_runtime_suspended(&timer
->pdev
->dev
)))
583 __omap_dm_timer_write_status(timer
, value
);
584 /* Save the context */
585 timer
->context
.tisr
= value
;
588 EXPORT_SYMBOL_GPL(omap_dm_timer_write_status
);
590 unsigned int omap_dm_timer_read_counter(struct omap_dm_timer
*timer
)
592 if (unlikely(!timer
|| pm_runtime_suspended(&timer
->pdev
->dev
))) {
593 pr_err("%s: timer not iavailable or enabled.\n", __func__
);
597 return __omap_dm_timer_read_counter(timer
, timer
->posted
);
599 EXPORT_SYMBOL_GPL(omap_dm_timer_read_counter
);
601 int omap_dm_timer_write_counter(struct omap_dm_timer
*timer
, unsigned int value
)
603 if (unlikely(!timer
|| pm_runtime_suspended(&timer
->pdev
->dev
))) {
604 pr_err("%s: timer not available or enabled.\n", __func__
);
608 omap_dm_timer_write_reg(timer
, OMAP_TIMER_COUNTER_REG
, value
);
610 /* Save the context */
611 timer
->context
.tcrr
= value
;
614 EXPORT_SYMBOL_GPL(omap_dm_timer_write_counter
);
616 int omap_dm_timers_active(void)
618 struct omap_dm_timer
*timer
;
620 list_for_each_entry(timer
, &omap_timer_list
, node
) {
621 if (!timer
->reserved
)
624 if (omap_dm_timer_read_reg(timer
, OMAP_TIMER_CTRL_REG
) &
625 OMAP_TIMER_CTRL_ST
) {
631 EXPORT_SYMBOL_GPL(omap_dm_timers_active
);
634 * omap_dm_timer_probe - probe function called for every registered device
635 * @pdev: pointer to current timer platform device
637 * Called by driver framework at the end of device registration for all
640 static int __devinit
omap_dm_timer_probe(struct platform_device
*pdev
)
644 struct omap_dm_timer
*timer
;
645 struct resource
*mem
, *irq
, *ioarea
;
646 struct dmtimer_platform_data
*pdata
= pdev
->dev
.platform_data
;
649 dev_err(&pdev
->dev
, "%s: no platform data.\n", __func__
);
653 irq
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
654 if (unlikely(!irq
)) {
655 dev_err(&pdev
->dev
, "%s: no IRQ resource.\n", __func__
);
659 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
660 if (unlikely(!mem
)) {
661 dev_err(&pdev
->dev
, "%s: no memory resource.\n", __func__
);
665 ioarea
= request_mem_region(mem
->start
, resource_size(mem
),
668 dev_err(&pdev
->dev
, "%s: region already claimed.\n", __func__
);
672 timer
= kzalloc(sizeof(struct omap_dm_timer
), GFP_KERNEL
);
674 dev_err(&pdev
->dev
, "%s: no memory for omap_dm_timer.\n",
677 goto err_free_ioregion
;
680 timer
->io_base
= ioremap(mem
->start
, resource_size(mem
));
681 if (!timer
->io_base
) {
682 dev_err(&pdev
->dev
, "%s: ioremap failed.\n", __func__
);
687 timer
->id
= pdev
->id
;
688 timer
->irq
= irq
->start
;
689 timer
->reserved
= omap_dm_timer_reserved_systimer(timer
->id
);
691 timer
->capability
= pdata
->timer_capability
;
693 /* Skip pm_runtime_enable for OMAP1 */
694 if (!(timer
->capability
& OMAP_TIMER_NEEDS_RESET
)) {
695 pm_runtime_enable(&pdev
->dev
);
696 pm_runtime_irq_safe(&pdev
->dev
);
699 if (!timer
->reserved
) {
700 pm_runtime_get_sync(&pdev
->dev
);
701 __omap_dm_timer_init_regs(timer
);
702 pm_runtime_put(&pdev
->dev
);
705 /* add the timer element to the list */
706 spin_lock_irqsave(&dm_timer_lock
, flags
);
707 list_add_tail(&timer
->node
, &omap_timer_list
);
708 spin_unlock_irqrestore(&dm_timer_lock
, flags
);
710 dev_dbg(&pdev
->dev
, "Device Probed.\n");
718 release_mem_region(mem
->start
, resource_size(mem
));
724 * omap_dm_timer_remove - cleanup a registered timer device
725 * @pdev: pointer to current timer platform device
727 * Called by driver framework whenever a timer device is unregistered.
728 * In addition to freeing platform resources it also deletes the timer
729 * entry from the local list.
731 static int __devexit
omap_dm_timer_remove(struct platform_device
*pdev
)
733 struct omap_dm_timer
*timer
;
737 spin_lock_irqsave(&dm_timer_lock
, flags
);
738 list_for_each_entry(timer
, &omap_timer_list
, node
)
739 if (timer
->pdev
->id
== pdev
->id
) {
740 list_del(&timer
->node
);
745 spin_unlock_irqrestore(&dm_timer_lock
, flags
);
750 static struct platform_driver omap_dm_timer_driver
= {
751 .probe
= omap_dm_timer_probe
,
752 .remove
= __devexit_p(omap_dm_timer_remove
),
754 .name
= "omap_timer",
758 static int __init
omap_dm_timer_driver_init(void)
760 return platform_driver_register(&omap_dm_timer_driver
);
763 static void __exit
omap_dm_timer_driver_exit(void)
765 platform_driver_unregister(&omap_dm_timer_driver
);
768 early_platform_init("earlytimer", &omap_dm_timer_driver
);
769 module_init(omap_dm_timer_driver_init
);
770 module_exit(omap_dm_timer_driver_exit
);
772 MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
773 MODULE_LICENSE("GPL");
774 MODULE_ALIAS("platform:" DRIVER_NAME
);
775 MODULE_AUTHOR("Texas Instruments Inc");