+static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
+{
+ p9_mux_put_tag(m, req->tag);
+ kfree(req);
+}
+
+static void p9_conn_rpc_cb(struct p9_req *req);
+
+static void p9_mux_flush_cb(struct p9_req *freq)
+{
+ int tag;
+ struct p9_conn *m = freq->m;
+ struct p9_req *req, *rreq, *rptr;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
+ freq->tcall, freq->rcall, freq->err,
+ freq->tcall->params.tflush.oldtag);
+
+ spin_lock(&m->lock);
+ tag = freq->tcall->params.tflush.oldtag;
+ req = NULL;
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == tag) {
+ req = rreq;
+ list_del(&req->req_list);
+ break;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ if (req) {
+ spin_lock(&req->lock);
+ req->flush = Flushed;
+ spin_unlock(&req->lock);
+
+ p9_conn_rpc_cb(req);
+ }
+
+ kfree(freq->tcall);
+ kfree(freq->rcall);
+ p9_mux_free_request(m, freq);
+}
+
+static void p9_conn_rpc_cb(struct p9_req *req)
+{
+ P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
+
+ if (req->tcall->id == P9_TFLUSH) { /* flush callback */
+ P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
+ p9_mux_flush_cb(req);
+ } else { /* normal wakeup path */
+ P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
+ if (req->flush != None && !req->err)
+ req->err = -ERESTARTSYS;
+
+ wake_up(&req->wqueue);
+ }
+}
+