1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/hashtable.h>
18 #include <linux/spinlock_types.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
24 * Maximum lifetime of a call (in jiffies).
26 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
29 * Time till dead call expires after last use (in jiffies).
31 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
33 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
34 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
35 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
36 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
37 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
38 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
39 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
40 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
41 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
42 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
43 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
44 [RXRPC_CALL_COMPLETE
] = "Complete",
45 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
46 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
47 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
48 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
49 [RXRPC_CALL_DEAD
] = "Dead ",
52 struct kmem_cache
*rxrpc_call_jar
;
53 LIST_HEAD(rxrpc_calls
);
54 DEFINE_RWLOCK(rxrpc_call_lock
);
56 static void rxrpc_destroy_call(struct work_struct
*work
);
57 static void rxrpc_call_life_expired(unsigned long _call
);
58 static void rxrpc_dead_call_expired(unsigned long _call
);
59 static void rxrpc_ack_time_expired(unsigned long _call
);
60 static void rxrpc_resend_time_expired(unsigned long _call
);
62 static DEFINE_SPINLOCK(rxrpc_call_hash_lock
);
63 static DEFINE_HASHTABLE(rxrpc_call_hash
, 10);
66 * Hash function for rxrpc_call_hash
68 static unsigned long rxrpc_call_hashfunc(
76 unsigned int addr_size
,
85 key
= (unsigned long)localptr
;
86 /* We just want to add up the __be32 values, so forcing the
87 * cast should be okay.
92 key
+= (cid
& RXRPC_CIDMASK
) >> RXRPC_CIDSHIFT
;
93 key
+= cid
& RXRPC_CHANNELMASK
;
96 /* Step through the peer address in 16-bit portions for speed */
97 for (i
= 0, p
= (const u16
*)peer_addr
; i
< addr_size
>> 1; i
++, p
++)
99 _leave(" key = 0x%lx", key
);
104 * Add a call to the hashtable
106 static void rxrpc_call_hash_add(struct rxrpc_call
*call
)
109 unsigned int addr_size
= 0;
112 switch (call
->proto
) {
114 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
117 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
122 key
= rxrpc_call_hashfunc(call
->in_clientflag
, call
->cid
,
123 call
->call_id
, call
->epoch
,
124 call
->service_id
, call
->proto
,
125 call
->conn
->trans
->local
, addr_size
,
126 call
->peer_ip
.ipv6_addr
);
127 /* Store the full key in the call */
128 call
->hash_key
= key
;
129 spin_lock(&rxrpc_call_hash_lock
);
130 hash_add_rcu(rxrpc_call_hash
, &call
->hash_node
, key
);
131 spin_unlock(&rxrpc_call_hash_lock
);
136 * Remove a call from the hashtable
138 static void rxrpc_call_hash_del(struct rxrpc_call
*call
)
141 spin_lock(&rxrpc_call_hash_lock
);
142 hash_del_rcu(&call
->hash_node
);
143 spin_unlock(&rxrpc_call_hash_lock
);
148 * Find a call in the hashtable and return it, or NULL if it
151 struct rxrpc_call
*rxrpc_find_call_hash(
152 struct rxrpc_host_header
*hdr
,
155 const void *peer_addr
)
158 unsigned int addr_size
= 0;
159 struct rxrpc_call
*call
= NULL
;
160 struct rxrpc_call
*ret
= NULL
;
161 u8 in_clientflag
= hdr
->flags
& RXRPC_CLIENT_INITIATED
;
166 addr_size
= sizeof(call
->peer_ip
.ipv4_addr
);
169 addr_size
= sizeof(call
->peer_ip
.ipv6_addr
);
175 key
= rxrpc_call_hashfunc(in_clientflag
, hdr
->cid
, hdr
->callNumber
,
176 hdr
->epoch
, hdr
->serviceId
,
177 proto
, localptr
, addr_size
,
179 hash_for_each_possible_rcu(rxrpc_call_hash
, call
, hash_node
, key
) {
180 if (call
->hash_key
== key
&&
181 call
->call_id
== hdr
->callNumber
&&
182 call
->cid
== hdr
->cid
&&
183 call
->in_clientflag
== in_clientflag
&&
184 call
->service_id
== hdr
->serviceId
&&
185 call
->proto
== proto
&&
186 call
->local
== localptr
&&
187 memcmp(call
->peer_ip
.ipv6_addr
, peer_addr
,
189 call
->epoch
== hdr
->epoch
) {
194 _leave(" = %p", ret
);
199 * allocate a new call
201 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
203 struct rxrpc_call
*call
;
205 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
209 call
->acks_winsz
= 16;
210 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
212 if (!call
->acks_window
) {
213 kmem_cache_free(rxrpc_call_jar
, call
);
217 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
218 (unsigned long) call
);
219 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
220 (unsigned long) call
);
221 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
222 (unsigned long) call
);
223 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
224 (unsigned long) call
);
225 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
226 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
227 INIT_LIST_HEAD(&call
->accept_link
);
228 skb_queue_head_init(&call
->rx_queue
);
229 skb_queue_head_init(&call
->rx_oos_queue
);
230 init_waitqueue_head(&call
->tx_waitq
);
231 spin_lock_init(&call
->lock
);
232 rwlock_init(&call
->state_lock
);
233 atomic_set(&call
->usage
, 1);
234 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
235 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
237 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
239 call
->rx_data_expect
= 1;
240 call
->rx_data_eaten
= 0;
241 call
->rx_first_oos
= 0;
242 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
243 call
->creation_jif
= jiffies
;
248 * allocate a new client call and attempt to get a connection slot for it
250 static struct rxrpc_call
*rxrpc_alloc_client_call(
251 struct rxrpc_sock
*rx
,
252 struct rxrpc_transport
*trans
,
253 struct rxrpc_conn_bundle
*bundle
,
256 struct rxrpc_call
*call
;
262 ASSERT(trans
!= NULL
);
263 ASSERT(bundle
!= NULL
);
265 call
= rxrpc_alloc_call(gfp
);
267 return ERR_PTR(-ENOMEM
);
271 call
->rx_data_post
= 1;
273 ret
= rxrpc_connect_call(rx
, trans
, bundle
, call
, gfp
);
275 kmem_cache_free(rxrpc_call_jar
, call
);
279 /* Record copies of information for hashtable lookup */
280 call
->proto
= rx
->proto
;
281 call
->local
= trans
->local
;
282 switch (call
->proto
) {
284 call
->peer_ip
.ipv4_addr
=
285 trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
288 memcpy(call
->peer_ip
.ipv6_addr
,
289 trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
290 sizeof(call
->peer_ip
.ipv6_addr
));
293 call
->epoch
= call
->conn
->epoch
;
294 call
->service_id
= call
->conn
->service_id
;
295 call
->in_clientflag
= call
->conn
->in_clientflag
;
296 /* Add the new call to the hashtable */
297 rxrpc_call_hash_add(call
);
299 spin_lock(&call
->conn
->trans
->peer
->lock
);
300 list_add(&call
->error_link
, &call
->conn
->trans
->peer
->error_targets
);
301 spin_unlock(&call
->conn
->trans
->peer
->lock
);
303 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
304 add_timer(&call
->lifetimer
);
306 _leave(" = %p", call
);
311 * set up a call for the given data
312 * - called in process context with IRQs enabled
314 struct rxrpc_call
*rxrpc_get_client_call(struct rxrpc_sock
*rx
,
315 struct rxrpc_transport
*trans
,
316 struct rxrpc_conn_bundle
*bundle
,
317 unsigned long user_call_ID
,
321 struct rxrpc_call
*call
, *candidate
;
322 struct rb_node
*p
, *parent
, **pp
;
324 _enter("%p,%d,%d,%lx,%d",
325 rx
, trans
? trans
->debug_id
: -1, bundle
? bundle
->debug_id
: -1,
326 user_call_ID
, create
);
328 /* search the extant calls first for one that matches the specified
330 read_lock(&rx
->call_lock
);
332 p
= rx
->calls
.rb_node
;
334 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
336 if (user_call_ID
< call
->user_call_ID
)
338 else if (user_call_ID
> call
->user_call_ID
)
341 goto found_extant_call
;
344 read_unlock(&rx
->call_lock
);
346 if (!create
|| !trans
)
347 return ERR_PTR(-EBADSLT
);
349 /* not yet present - create a candidate for a new record and then
351 candidate
= rxrpc_alloc_client_call(rx
, trans
, bundle
, gfp
);
352 if (IS_ERR(candidate
)) {
353 _leave(" = %ld", PTR_ERR(candidate
));
357 candidate
->user_call_ID
= user_call_ID
;
358 __set_bit(RXRPC_CALL_HAS_USERID
, &candidate
->flags
);
360 write_lock(&rx
->call_lock
);
362 pp
= &rx
->calls
.rb_node
;
366 call
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
368 if (user_call_ID
< call
->user_call_ID
)
369 pp
= &(*pp
)->rb_left
;
370 else if (user_call_ID
> call
->user_call_ID
)
371 pp
= &(*pp
)->rb_right
;
373 goto found_extant_second
;
376 /* second search also failed; add the new call */
379 rxrpc_get_call(call
);
381 rb_link_node(&call
->sock_node
, parent
, pp
);
382 rb_insert_color(&call
->sock_node
, &rx
->calls
);
383 write_unlock(&rx
->call_lock
);
385 write_lock_bh(&rxrpc_call_lock
);
386 list_add_tail(&call
->link
, &rxrpc_calls
);
387 write_unlock_bh(&rxrpc_call_lock
);
389 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
391 _leave(" = %p [new]", call
);
394 /* we found the call in the list immediately */
396 rxrpc_get_call(call
);
397 read_unlock(&rx
->call_lock
);
398 _leave(" = %p [extant %d]", call
, atomic_read(&call
->usage
));
401 /* we found the call on the second time through the list */
403 rxrpc_get_call(call
);
404 write_unlock(&rx
->call_lock
);
405 rxrpc_put_call(candidate
);
406 _leave(" = %p [second %d]", call
, atomic_read(&call
->usage
));
411 * set up an incoming call
412 * - called in process context with IRQs enabled
414 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
415 struct rxrpc_connection
*conn
,
416 struct rxrpc_host_header
*hdr
)
418 struct rxrpc_call
*call
, *candidate
;
419 struct rb_node
**p
, *parent
;
422 _enter(",%d", conn
->debug_id
);
426 candidate
= rxrpc_alloc_call(GFP_NOIO
);
428 return ERR_PTR(-EBUSY
);
430 candidate
->socket
= rx
;
431 candidate
->conn
= conn
;
432 candidate
->cid
= hdr
->cid
;
433 candidate
->call_id
= hdr
->callNumber
;
434 candidate
->channel
= hdr
->cid
& RXRPC_CHANNELMASK
;
435 candidate
->rx_data_post
= 0;
436 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
437 if (conn
->security_ix
> 0)
438 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
440 write_lock_bh(&conn
->lock
);
442 /* set the channel for this call */
443 call
= conn
->channels
[candidate
->channel
];
444 _debug("channel[%u] is %p", candidate
->channel
, call
);
445 if (call
&& call
->call_id
== hdr
->callNumber
) {
446 /* already set; must've been a duplicate packet */
447 _debug("extant call [%d]", call
->state
);
448 ASSERTCMP(call
->conn
, ==, conn
);
450 read_lock(&call
->state_lock
);
451 switch (call
->state
) {
452 case RXRPC_CALL_LOCALLY_ABORTED
:
453 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
454 rxrpc_queue_call(call
);
455 case RXRPC_CALL_REMOTELY_ABORTED
:
456 read_unlock(&call
->state_lock
);
459 rxrpc_get_call(call
);
460 read_unlock(&call
->state_lock
);
466 /* it seems the channel is still in use from the previous call
467 * - ditch the old binding if its call is now complete */
468 _debug("CALL: %u { %s }",
469 call
->debug_id
, rxrpc_call_states
[call
->state
]);
471 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
472 conn
->channels
[call
->channel
] = NULL
;
474 write_unlock_bh(&conn
->lock
);
475 kmem_cache_free(rxrpc_call_jar
, candidate
);
477 return ERR_PTR(-EBUSY
);
481 /* check the call number isn't duplicate */
483 call_id
= hdr
->callNumber
;
484 p
= &conn
->calls
.rb_node
;
488 call
= rb_entry(parent
, struct rxrpc_call
, conn_node
);
490 /* The tree is sorted in order of the __be32 value without
491 * turning it into host order.
493 if (call_id
< call
->call_id
)
495 else if (call_id
> call
->call_id
)
501 /* make the call available */
505 rb_link_node(&call
->conn_node
, parent
, p
);
506 rb_insert_color(&call
->conn_node
, &conn
->calls
);
507 conn
->channels
[call
->channel
] = call
;
509 atomic_inc(&conn
->usage
);
510 write_unlock_bh(&conn
->lock
);
512 spin_lock(&conn
->trans
->peer
->lock
);
513 list_add(&call
->error_link
, &conn
->trans
->peer
->error_targets
);
514 spin_unlock(&conn
->trans
->peer
->lock
);
516 write_lock_bh(&rxrpc_call_lock
);
517 list_add_tail(&call
->link
, &rxrpc_calls
);
518 write_unlock_bh(&rxrpc_call_lock
);
520 /* Record copies of information for hashtable lookup */
521 call
->proto
= rx
->proto
;
522 call
->local
= conn
->trans
->local
;
523 switch (call
->proto
) {
525 call
->peer_ip
.ipv4_addr
=
526 conn
->trans
->peer
->srx
.transport
.sin
.sin_addr
.s_addr
;
529 memcpy(call
->peer_ip
.ipv6_addr
,
530 conn
->trans
->peer
->srx
.transport
.sin6
.sin6_addr
.in6_u
.u6_addr8
,
531 sizeof(call
->peer_ip
.ipv6_addr
));
536 call
->epoch
= conn
->epoch
;
537 call
->service_id
= conn
->service_id
;
538 call
->in_clientflag
= conn
->in_clientflag
;
539 /* Add the new call to the hashtable */
540 rxrpc_call_hash_add(call
);
542 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
544 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
545 add_timer(&call
->lifetimer
);
546 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
550 write_unlock_bh(&conn
->lock
);
551 kmem_cache_free(rxrpc_call_jar
, candidate
);
552 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
556 write_unlock_bh(&conn
->lock
);
557 kmem_cache_free(rxrpc_call_jar
, candidate
);
558 _leave(" = -ECONNABORTED");
559 return ERR_PTR(-ECONNABORTED
);
562 write_unlock_bh(&conn
->lock
);
563 kmem_cache_free(rxrpc_call_jar
, candidate
);
564 _leave(" = -ECONNRESET [old]");
565 return ERR_PTR(-ECONNRESET
);
569 * find an extant server call
570 * - called in process context with IRQs enabled
572 struct rxrpc_call
*rxrpc_find_server_call(struct rxrpc_sock
*rx
,
573 unsigned long user_call_ID
)
575 struct rxrpc_call
*call
;
578 _enter("%p,%lx", rx
, user_call_ID
);
580 /* search the extant calls for one that matches the specified user
582 read_lock(&rx
->call_lock
);
584 p
= rx
->calls
.rb_node
;
586 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
588 if (user_call_ID
< call
->user_call_ID
)
590 else if (user_call_ID
> call
->user_call_ID
)
593 goto found_extant_call
;
596 read_unlock(&rx
->call_lock
);
600 /* we found the call in the list immediately */
602 rxrpc_get_call(call
);
603 read_unlock(&rx
->call_lock
);
604 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
609 * detach a call from a socket and set up for release
611 void rxrpc_release_call(struct rxrpc_call
*call
)
613 struct rxrpc_connection
*conn
= call
->conn
;
614 struct rxrpc_sock
*rx
= call
->socket
;
616 _enter("{%d,%d,%d,%d}",
617 call
->debug_id
, atomic_read(&call
->usage
),
618 atomic_read(&call
->ackr_not_idle
),
621 spin_lock_bh(&call
->lock
);
622 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
624 spin_unlock_bh(&call
->lock
);
626 /* dissociate from the socket
627 * - the socket's ref on the call is passed to the death timer
629 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
631 write_lock_bh(&rx
->call_lock
);
632 if (!list_empty(&call
->accept_link
)) {
633 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
634 call
, call
->events
, call
->flags
);
635 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
636 list_del_init(&call
->accept_link
);
637 sk_acceptq_removed(&rx
->sk
);
638 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
639 rb_erase(&call
->sock_node
, &rx
->calls
);
640 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
641 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
643 write_unlock_bh(&rx
->call_lock
);
645 /* free up the channel for reuse */
646 spin_lock(&conn
->trans
->client_lock
);
647 write_lock_bh(&conn
->lock
);
648 write_lock(&call
->state_lock
);
650 if (conn
->channels
[call
->channel
] == call
)
651 conn
->channels
[call
->channel
] = NULL
;
653 if (conn
->out_clientflag
&& conn
->bundle
) {
655 switch (conn
->avail_calls
) {
657 list_move_tail(&conn
->bundle_link
,
658 &conn
->bundle
->avail_conns
);
659 case 2 ... RXRPC_MAXCALLS
- 1:
660 ASSERT(conn
->channels
[0] == NULL
||
661 conn
->channels
[1] == NULL
||
662 conn
->channels
[2] == NULL
||
663 conn
->channels
[3] == NULL
);
666 list_move_tail(&conn
->bundle_link
,
667 &conn
->bundle
->unused_conns
);
668 ASSERT(conn
->channels
[0] == NULL
&&
669 conn
->channels
[1] == NULL
&&
670 conn
->channels
[2] == NULL
&&
671 conn
->channels
[3] == NULL
);
674 pr_err("conn->avail_calls=%d\n", conn
->avail_calls
);
679 spin_unlock(&conn
->trans
->client_lock
);
681 if (call
->state
< RXRPC_CALL_COMPLETE
&&
682 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
683 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
684 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
685 call
->local_abort
= RX_CALL_DEAD
;
686 set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
);
687 rxrpc_queue_call(call
);
689 write_unlock(&call
->state_lock
);
690 write_unlock_bh(&conn
->lock
);
692 /* clean up the Rx queue */
693 if (!skb_queue_empty(&call
->rx_queue
) ||
694 !skb_queue_empty(&call
->rx_oos_queue
)) {
695 struct rxrpc_skb_priv
*sp
;
698 _debug("purge Rx queues");
700 spin_lock_bh(&call
->lock
);
701 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
702 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
705 ASSERTCMP(sp
->call
, ==, call
);
706 rxrpc_put_call(call
);
709 skb
->destructor
= NULL
;
710 spin_unlock_bh(&call
->lock
);
712 _debug("- zap %s %%%u #%u",
713 rxrpc_pkts
[sp
->hdr
.type
],
714 sp
->hdr
.serial
, sp
->hdr
.seq
);
716 spin_lock_bh(&call
->lock
);
718 spin_unlock_bh(&call
->lock
);
720 ASSERTCMP(call
->state
, !=, RXRPC_CALL_COMPLETE
);
723 del_timer_sync(&call
->resend_timer
);
724 del_timer_sync(&call
->ack_timer
);
725 del_timer_sync(&call
->lifetimer
);
726 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
727 add_timer(&call
->deadspan
);
733 * handle a dead call being ready for reaping
735 static void rxrpc_dead_call_expired(unsigned long _call
)
737 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
739 _enter("{%d}", call
->debug_id
);
741 write_lock_bh(&call
->state_lock
);
742 call
->state
= RXRPC_CALL_DEAD
;
743 write_unlock_bh(&call
->state_lock
);
744 rxrpc_put_call(call
);
748 * mark a call as to be released, aborting it if it's still in progress
749 * - called with softirqs disabled
751 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
755 write_lock(&call
->state_lock
);
756 if (call
->state
< RXRPC_CALL_DEAD
) {
758 if (call
->state
< RXRPC_CALL_COMPLETE
) {
759 _debug("abort call %p", call
);
760 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
761 call
->local_abort
= RX_CALL_DEAD
;
762 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
765 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
768 rxrpc_queue_call(call
);
770 write_unlock(&call
->state_lock
);
774 * release all the calls associated with a socket
776 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
778 struct rxrpc_call
*call
;
783 read_lock_bh(&rx
->call_lock
);
785 /* mark all the calls as no longer wanting incoming packets */
786 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
787 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
788 rxrpc_mark_call_released(call
);
791 /* kill the not-yet-accepted incoming calls */
792 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
793 rxrpc_mark_call_released(call
);
796 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
797 rxrpc_mark_call_released(call
);
800 read_unlock_bh(&rx
->call_lock
);
807 void __rxrpc_put_call(struct rxrpc_call
*call
)
809 ASSERT(call
!= NULL
);
811 _enter("%p{u=%d}", call
, atomic_read(&call
->usage
));
813 ASSERTCMP(atomic_read(&call
->usage
), >, 0);
815 if (atomic_dec_and_test(&call
->usage
)) {
816 _debug("call %d dead", call
->debug_id
);
817 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
818 rxrpc_queue_work(&call
->destroyer
);
826 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
828 _net("DESTROY CALL %d", call
->debug_id
);
830 ASSERT(call
->socket
);
832 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
834 del_timer_sync(&call
->lifetimer
);
835 del_timer_sync(&call
->deadspan
);
836 del_timer_sync(&call
->ack_timer
);
837 del_timer_sync(&call
->resend_timer
);
839 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
840 ASSERTCMP(call
->events
, ==, 0);
841 if (work_pending(&call
->processor
)) {
842 _debug("defer destroy");
843 rxrpc_queue_work(&call
->destroyer
);
848 spin_lock(&call
->conn
->trans
->peer
->lock
);
849 list_del(&call
->error_link
);
850 spin_unlock(&call
->conn
->trans
->peer
->lock
);
852 write_lock_bh(&call
->conn
->lock
);
853 rb_erase(&call
->conn_node
, &call
->conn
->calls
);
854 write_unlock_bh(&call
->conn
->lock
);
855 rxrpc_put_connection(call
->conn
);
858 /* Remove the call from the hash */
859 rxrpc_call_hash_del(call
);
861 if (call
->acks_window
) {
862 _debug("kill Tx window %d",
863 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
866 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
867 call
->acks_winsz
) > 0) {
868 struct rxrpc_skb_priv
*sp
;
871 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
872 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
873 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
874 rxrpc_free_skb((struct sk_buff
*)_skb
);
876 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
879 kfree(call
->acks_window
);
882 rxrpc_free_skb(call
->tx_pending
);
884 rxrpc_purge_queue(&call
->rx_queue
);
885 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
886 sock_put(&call
->socket
->sk
);
887 kmem_cache_free(rxrpc_call_jar
, call
);
893 static void rxrpc_destroy_call(struct work_struct
*work
)
895 struct rxrpc_call
*call
=
896 container_of(work
, struct rxrpc_call
, destroyer
);
898 _enter("%p{%d,%d,%p}",
899 call
, atomic_read(&call
->usage
), call
->channel
, call
->conn
);
901 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
903 write_lock_bh(&rxrpc_call_lock
);
904 list_del_init(&call
->link
);
905 write_unlock_bh(&rxrpc_call_lock
);
907 rxrpc_cleanup_call(call
);
912 * preemptively destroy all the call records from a transport endpoint rather
913 * than waiting for them to time out
915 void __exit
rxrpc_destroy_all_calls(void)
917 struct rxrpc_call
*call
;
920 write_lock_bh(&rxrpc_call_lock
);
922 while (!list_empty(&rxrpc_calls
)) {
923 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
924 _debug("Zapping call %p", call
);
926 list_del_init(&call
->link
);
928 switch (atomic_read(&call
->usage
)) {
930 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
933 if (del_timer_sync(&call
->deadspan
) != 0 &&
934 call
->state
!= RXRPC_CALL_DEAD
)
935 rxrpc_dead_call_expired((unsigned long) call
);
936 if (call
->state
!= RXRPC_CALL_DEAD
)
939 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
940 call
, atomic_read(&call
->usage
),
941 atomic_read(&call
->ackr_not_idle
),
942 rxrpc_call_states
[call
->state
],
943 call
->flags
, call
->events
);
944 if (!skb_queue_empty(&call
->rx_queue
))
945 pr_err("Rx queue occupied\n");
946 if (!skb_queue_empty(&call
->rx_oos_queue
))
947 pr_err("OOS queue occupied\n");
951 write_unlock_bh(&rxrpc_call_lock
);
953 write_lock_bh(&rxrpc_call_lock
);
956 write_unlock_bh(&rxrpc_call_lock
);
961 * handle call lifetime being exceeded
963 static void rxrpc_call_life_expired(unsigned long _call
)
965 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
967 if (call
->state
>= RXRPC_CALL_COMPLETE
)
970 _enter("{%d}", call
->debug_id
);
971 read_lock_bh(&call
->state_lock
);
972 if (call
->state
< RXRPC_CALL_COMPLETE
) {
973 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
974 rxrpc_queue_call(call
);
976 read_unlock_bh(&call
->state_lock
);
980 * handle resend timer expiry
981 * - may not take call->state_lock as this can deadlock against del_timer_sync()
983 static void rxrpc_resend_time_expired(unsigned long _call
)
985 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
987 _enter("{%d}", call
->debug_id
);
989 if (call
->state
>= RXRPC_CALL_COMPLETE
)
992 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
993 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
994 rxrpc_queue_call(call
);
998 * handle ACK timer expiry
1000 static void rxrpc_ack_time_expired(unsigned long _call
)
1002 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
1004 _enter("{%d}", call
->debug_id
);
1006 if (call
->state
>= RXRPC_CALL_COMPLETE
)
1009 read_lock_bh(&call
->state_lock
);
1010 if (call
->state
< RXRPC_CALL_COMPLETE
&&
1011 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
1012 rxrpc_queue_call(call
);
1013 read_unlock_bh(&call
->state_lock
);