2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
70 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
73 dma_addr_t
trb_virt_to_dma(struct xhci_segment
*seg
,
78 if (!seg
|| !trb
|| (void *) trb
< (void *) seg
->trbs
)
80 /* offset in bytes, since these are byte-addressable */
81 offset
= (unsigned int) trb
- (unsigned int) seg
->trbs
;
82 /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
83 if (offset
> SEGMENT_SIZE
|| (offset
% sizeof(*trb
)) != 0)
85 return seg
->dma
+ offset
;
88 /* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
91 static inline bool last_trb_on_last_seg(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
92 struct xhci_segment
*seg
, union xhci_trb
*trb
)
94 if (ring
== xhci
->event_ring
)
95 return (trb
== &seg
->trbs
[TRBS_PER_SEGMENT
]) &&
96 (seg
->next
== xhci
->event_ring
->first_seg
);
98 return trb
->link
.control
& LINK_TOGGLE
;
101 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
105 static inline int last_trb(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
106 struct xhci_segment
*seg
, union xhci_trb
*trb
)
108 if (ring
== xhci
->event_ring
)
109 return trb
== &seg
->trbs
[TRBS_PER_SEGMENT
];
111 return (trb
->link
.control
& TRB_TYPE_BITMASK
) == TRB_TYPE(TRB_LINK
);
115 * See Cycle bit rules. SW is the consumer for the event ring only.
116 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
118 static void inc_deq(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
, bool consumer
)
120 union xhci_trb
*next
= ++(ring
->dequeue
);
123 /* Update the dequeue pointer further if that was a link TRB or we're at
124 * the end of an event ring segment (which doesn't have link TRBS)
126 while (last_trb(xhci
, ring
, ring
->deq_seg
, next
)) {
127 if (consumer
&& last_trb_on_last_seg(xhci
, ring
, ring
->deq_seg
, next
)) {
128 ring
->cycle_state
= (ring
->cycle_state
? 0 : 1);
130 xhci_dbg(xhci
, "Toggle cycle state for ring 0x%x = %i\n",
132 (unsigned int) ring
->cycle_state
);
134 ring
->deq_seg
= ring
->deq_seg
->next
;
135 ring
->dequeue
= ring
->deq_seg
->trbs
;
136 next
= ring
->dequeue
;
141 * See Cycle bit rules. SW is the consumer for the event ring only.
142 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
144 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
145 * chain bit is set), then set the chain bit in all the following link TRBs.
146 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
147 * have their chain bit cleared (so that each Link TRB is a separate TD).
149 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
150 * set, but other sections talk about dealing with the chain bit set.
151 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
153 static void inc_enq(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
, bool consumer
)
156 union xhci_trb
*next
;
158 chain
= ring
->enqueue
->generic
.field
[3] & TRB_CHAIN
;
159 next
= ++(ring
->enqueue
);
162 /* Update the dequeue pointer further if that was a link TRB or we're at
163 * the end of an event ring segment (which doesn't have link TRBS)
165 while (last_trb(xhci
, ring
, ring
->enq_seg
, next
)) {
167 if (ring
!= xhci
->event_ring
) {
168 /* Give this link TRB to the hardware */
169 if (next
->link
.control
& TRB_CYCLE
)
170 next
->link
.control
&= (u32
) ~TRB_CYCLE
;
172 next
->link
.control
|= (u32
) TRB_CYCLE
;
173 next
->link
.control
&= TRB_CHAIN
;
174 next
->link
.control
|= chain
;
176 /* Toggle the cycle bit after the last ring segment. */
177 if (last_trb_on_last_seg(xhci
, ring
, ring
->enq_seg
, next
)) {
178 ring
->cycle_state
= (ring
->cycle_state
? 0 : 1);
180 xhci_dbg(xhci
, "Toggle cycle state for ring 0x%x = %i\n",
182 (unsigned int) ring
->cycle_state
);
185 ring
->enq_seg
= ring
->enq_seg
->next
;
186 ring
->enqueue
= ring
->enq_seg
->trbs
;
187 next
= ring
->enqueue
;
192 * Check to see if there's room to enqueue num_trbs on the ring. See rules
194 * FIXME: this would be simpler and faster if we just kept track of the number
195 * of free TRBs in a ring.
197 static int room_on_ring(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
198 unsigned int num_trbs
)
201 union xhci_trb
*enq
= ring
->enqueue
;
202 struct xhci_segment
*enq_seg
= ring
->enq_seg
;
204 /* Check if ring is empty */
205 if (enq
== ring
->dequeue
)
207 /* Make sure there's an extra empty TRB available */
208 for (i
= 0; i
<= num_trbs
; ++i
) {
209 if (enq
== ring
->dequeue
)
212 while (last_trb(xhci
, ring
, enq_seg
, enq
)) {
213 enq_seg
= enq_seg
->next
;
220 void set_hc_event_deq(struct xhci_hcd
*xhci
)
225 deq
= trb_virt_to_dma(xhci
->event_ring
->deq_seg
,
226 xhci
->event_ring
->dequeue
);
227 if (deq
== 0 && !in_interrupt())
228 xhci_warn(xhci
, "WARN something wrong with SW event ring "
230 /* Update HC event ring dequeue pointer */
231 temp
= xhci_readl(xhci
, &xhci
->ir_set
->erst_dequeue
[0]);
232 temp
&= ERST_PTR_MASK
;
234 xhci_dbg(xhci
, "// Write event ring dequeue pointer\n");
235 xhci_writel(xhci
, 0, &xhci
->ir_set
->erst_dequeue
[1]);
236 xhci_writel(xhci
, (deq
& ~ERST_PTR_MASK
) | temp
,
237 &xhci
->ir_set
->erst_dequeue
[0]);
240 /* Ring the host controller doorbell after placing a command on the ring */
241 void ring_cmd_db(struct xhci_hcd
*xhci
)
245 xhci_dbg(xhci
, "// Ding dong!\n");
246 temp
= xhci_readl(xhci
, &xhci
->dba
->doorbell
[0]) & DB_MASK
;
247 xhci_writel(xhci
, temp
| DB_TARGET_HOST
, &xhci
->dba
->doorbell
[0]);
248 /* Flush PCI posted writes */
249 xhci_readl(xhci
, &xhci
->dba
->doorbell
[0]);
252 static void handle_cmd_completion(struct xhci_hcd
*xhci
,
253 struct xhci_event_cmd
*event
)
255 int slot_id
= TRB_TO_SLOT_ID(event
->flags
);
257 dma_addr_t cmd_dequeue_dma
;
259 cmd_dma
= (((u64
) event
->cmd_trb
[1]) << 32) + event
->cmd_trb
[0];
260 cmd_dequeue_dma
= trb_virt_to_dma(xhci
->cmd_ring
->deq_seg
,
261 xhci
->cmd_ring
->dequeue
);
262 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
263 if (cmd_dequeue_dma
== 0) {
264 xhci
->error_bitmask
|= 1 << 4;
267 /* Does the DMA address match our internal dequeue pointer address? */
268 if (cmd_dma
!= (u64
) cmd_dequeue_dma
) {
269 xhci
->error_bitmask
|= 1 << 5;
272 switch (xhci
->cmd_ring
->dequeue
->generic
.field
[3] & TRB_TYPE_BITMASK
) {
273 case TRB_TYPE(TRB_ENABLE_SLOT
):
274 if (GET_COMP_CODE(event
->status
) == COMP_SUCCESS
)
275 xhci
->slot_id
= slot_id
;
278 complete(&xhci
->addr_dev
);
280 case TRB_TYPE(TRB_DISABLE_SLOT
):
281 if (xhci
->devs
[slot_id
])
282 xhci_free_virt_device(xhci
, slot_id
);
284 case TRB_TYPE(TRB_ADDR_DEV
):
285 xhci
->devs
[slot_id
]->cmd_status
= GET_COMP_CODE(event
->status
);
286 complete(&xhci
->addr_dev
);
288 case TRB_TYPE(TRB_CMD_NOOP
):
289 ++xhci
->noops_handled
;
292 /* Skip over unknown commands on the event ring */
293 xhci
->error_bitmask
|= 1 << 6;
296 inc_deq(xhci
, xhci
->cmd_ring
, false);
299 static void handle_port_status(struct xhci_hcd
*xhci
,
300 union xhci_trb
*event
)
304 /* Port status change events always have a successful completion code */
305 if (GET_COMP_CODE(event
->generic
.field
[2]) != COMP_SUCCESS
) {
306 xhci_warn(xhci
, "WARN: xHC returned failed port status event\n");
307 xhci
->error_bitmask
|= 1 << 8;
309 /* FIXME: core doesn't care about all port link state changes yet */
310 port_id
= GET_PORT_ID(event
->generic
.field
[0]);
311 xhci_dbg(xhci
, "Port Status Change Event for port %d\n", port_id
);
313 /* Update event ring dequeue pointer before dropping the lock */
314 inc_deq(xhci
, xhci
->event_ring
, true);
315 set_hc_event_deq(xhci
);
317 spin_unlock(&xhci
->lock
);
318 /* Pass this up to the core */
319 usb_hcd_poll_rh_status(xhci_to_hcd(xhci
));
320 spin_lock(&xhci
->lock
);
324 * This function handles all OS-owned events on the event ring. It may drop
325 * xhci->lock between event processing (e.g. to pass up port status changes).
327 void handle_event(struct xhci_hcd
*xhci
)
329 union xhci_trb
*event
;
332 if (!xhci
->event_ring
|| !xhci
->event_ring
->dequeue
) {
333 xhci
->error_bitmask
|= 1 << 1;
337 event
= xhci
->event_ring
->dequeue
;
338 /* Does the HC or OS own the TRB? */
339 if ((event
->event_cmd
.flags
& TRB_CYCLE
) !=
340 xhci
->event_ring
->cycle_state
) {
341 xhci
->error_bitmask
|= 1 << 2;
345 /* FIXME: Handle more event types. */
346 switch ((event
->event_cmd
.flags
& TRB_TYPE_BITMASK
)) {
347 case TRB_TYPE(TRB_COMPLETION
):
348 handle_cmd_completion(xhci
, &event
->event_cmd
);
350 case TRB_TYPE(TRB_PORT_STATUS
):
351 handle_port_status(xhci
, event
);
355 xhci
->error_bitmask
|= 1 << 3;
359 /* Update SW and HC event ring dequeue pointer */
360 inc_deq(xhci
, xhci
->event_ring
, true);
361 set_hc_event_deq(xhci
);
363 /* Are there more items on the event ring? */
368 * Generic function for queueing a TRB on a ring.
369 * The caller must have checked to make sure there's room on the ring.
371 static void queue_trb(struct xhci_hcd
*xhci
, struct xhci_ring
*ring
,
373 u32 field1
, u32 field2
, u32 field3
, u32 field4
)
375 struct xhci_generic_trb
*trb
;
377 trb
= &ring
->enqueue
->generic
;
378 trb
->field
[0] = field1
;
379 trb
->field
[1] = field2
;
380 trb
->field
[2] = field3
;
381 trb
->field
[3] = field4
;
382 inc_enq(xhci
, ring
, consumer
);
385 /* Generic function for queueing a command TRB on the command ring */
386 static int queue_command(struct xhci_hcd
*xhci
, u32 field1
, u32 field2
, u32 field3
, u32 field4
)
388 if (!room_on_ring(xhci
, xhci
->cmd_ring
, 1)) {
390 xhci_err(xhci
, "ERR: No room for command on command ring\n");
393 queue_trb(xhci
, xhci
->cmd_ring
, false, field1
, field2
, field3
,
394 field4
| xhci
->cmd_ring
->cycle_state
);
398 /* Queue a no-op command on the command ring */
399 static int queue_cmd_noop(struct xhci_hcd
*xhci
)
401 return queue_command(xhci
, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP
));
405 * Place a no-op command on the command ring to test the command and
408 void *setup_one_noop(struct xhci_hcd
*xhci
)
410 if (queue_cmd_noop(xhci
) < 0)
412 xhci
->noops_submitted
++;
416 /* Queue a slot enable or disable request on the command ring */
417 int queue_slot_control(struct xhci_hcd
*xhci
, u32 trb_type
, u32 slot_id
)
419 return queue_command(xhci
, 0, 0, 0,
420 TRB_TYPE(trb_type
) | SLOT_ID_FOR_TRB(slot_id
));
423 /* Queue an address device command TRB */
424 int queue_address_device(struct xhci_hcd
*xhci
, dma_addr_t in_ctx_ptr
, u32 slot_id
)
426 return queue_command(xhci
, in_ctx_ptr
, 0, 0,
427 TRB_TYPE(TRB_ADDR_DEV
) | SLOT_ID_FOR_TRB(slot_id
));