2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
6 #include <linux/stddef.h>
7 #include <linux/spinlock.h>
8 #include <linux/slab.h>
9 #include <net/caif/cfpkt.h>
10 #include <net/caif/cfmuxl.h>
11 #include <net/caif/cfsrvl.h>
12 #include <net/caif/cffrml.h>
14 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
16 #define CAIF_CTRL_CHANNEL 0
17 #define UP_CACHE_SIZE 8
18 #define DN_CACHE_SIZE 8
22 struct list_head srvl_list
;
23 struct list_head frml_list
;
24 struct cflayer
*up_cache
[UP_CACHE_SIZE
];
25 struct cflayer
*dn_cache
[DN_CACHE_SIZE
];
27 * Set when inserting or removing downwards layers.
29 spinlock_t transmit_lock
;
32 * Set when inserting or removing upwards layers.
34 spinlock_t receive_lock
;
38 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
);
39 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
);
40 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
42 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
);
44 struct cflayer
*cfmuxl_create(void)
46 struct cfmuxl
*this = kmalloc(sizeof(struct cfmuxl
), GFP_ATOMIC
);
49 memset(this, 0, sizeof(*this));
50 this->layer
.receive
= cfmuxl_receive
;
51 this->layer
.transmit
= cfmuxl_transmit
;
52 this->layer
.ctrlcmd
= cfmuxl_ctrlcmd
;
53 INIT_LIST_HEAD(&this->srvl_list
);
54 INIT_LIST_HEAD(&this->frml_list
);
55 spin_lock_init(&this->transmit_lock
);
56 spin_lock_init(&this->receive_lock
);
57 snprintf(this->layer
.name
, CAIF_LAYER_NAME_SZ
, "mux");
61 int cfmuxl_set_uplayer(struct cflayer
*layr
, struct cflayer
*up
, u8 linkid
)
63 struct cfmuxl
*muxl
= container_obj(layr
);
64 spin_lock(&muxl
->receive_lock
);
65 list_add(&up
->node
, &muxl
->srvl_list
);
66 spin_unlock(&muxl
->receive_lock
);
70 bool cfmuxl_is_phy_inuse(struct cflayer
*layr
, u8 phyid
)
72 struct list_head
*node
;
73 struct cflayer
*layer
;
74 struct cfmuxl
*muxl
= container_obj(layr
);
76 spin_lock(&muxl
->receive_lock
);
78 list_for_each(node
, &muxl
->srvl_list
) {
79 layer
= list_entry(node
, struct cflayer
, node
);
80 if (cfsrvl_phyid_match(layer
, phyid
)) {
86 spin_unlock(&muxl
->receive_lock
);
90 u8
cfmuxl_get_phyid(struct cflayer
*layr
, u8 channel_id
)
94 struct cfmuxl
*muxl
= container_obj(layr
);
95 spin_lock(&muxl
->receive_lock
);
96 up
= get_up(muxl
, channel_id
);
98 phyid
= cfsrvl_getphyid(up
);
101 spin_unlock(&muxl
->receive_lock
);
105 int cfmuxl_set_dnlayer(struct cflayer
*layr
, struct cflayer
*dn
, u8 phyid
)
107 struct cfmuxl
*muxl
= (struct cfmuxl
*) layr
;
108 spin_lock(&muxl
->transmit_lock
);
109 list_add(&dn
->node
, &muxl
->frml_list
);
110 spin_unlock(&muxl
->transmit_lock
);
114 static struct cflayer
*get_from_id(struct list_head
*list
, u16 id
)
116 struct list_head
*node
;
117 struct cflayer
*layer
;
118 list_for_each(node
, list
) {
119 layer
= list_entry(node
, struct cflayer
, node
);
126 struct cflayer
*cfmuxl_remove_dnlayer(struct cflayer
*layr
, u8 phyid
)
128 struct cfmuxl
*muxl
= container_obj(layr
);
130 spin_lock(&muxl
->transmit_lock
);
131 memset(muxl
->dn_cache
, 0, sizeof(muxl
->dn_cache
));
132 dn
= get_from_id(&muxl
->frml_list
, phyid
);
134 spin_unlock(&muxl
->transmit_lock
);
138 caif_assert(dn
!= NULL
);
139 spin_unlock(&muxl
->transmit_lock
);
143 /* Invariant: lock is taken */
144 static struct cflayer
*get_up(struct cfmuxl
*muxl
, u16 id
)
147 int idx
= id
% UP_CACHE_SIZE
;
148 up
= muxl
->up_cache
[idx
];
149 if (up
== NULL
|| up
->id
!= id
) {
150 up
= get_from_id(&muxl
->srvl_list
, id
);
151 muxl
->up_cache
[idx
] = up
;
156 /* Invariant: lock is taken */
157 static struct cflayer
*get_dn(struct cfmuxl
*muxl
, struct dev_info
*dev_info
)
160 int idx
= dev_info
->id
% DN_CACHE_SIZE
;
161 dn
= muxl
->dn_cache
[idx
];
162 if (dn
== NULL
|| dn
->id
!= dev_info
->id
) {
163 dn
= get_from_id(&muxl
->frml_list
, dev_info
->id
);
164 muxl
->dn_cache
[idx
] = dn
;
169 struct cflayer
*cfmuxl_remove_uplayer(struct cflayer
*layr
, u8 id
)
172 struct cfmuxl
*muxl
= container_obj(layr
);
173 spin_lock(&muxl
->receive_lock
);
174 up
= get_up(muxl
, id
);
175 memset(muxl
->up_cache
, 0, sizeof(muxl
->up_cache
));
177 spin_unlock(&muxl
->receive_lock
);
181 static int cfmuxl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
)
184 struct cfmuxl
*muxl
= container_obj(layr
);
187 if (cfpkt_extr_head(pkt
, &id
, 1) < 0) {
188 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__
);
193 spin_lock(&muxl
->receive_lock
);
194 up
= get_up(muxl
, id
);
195 spin_unlock(&muxl
->receive_lock
);
197 pr_info("CAIF: %s():Received data on unknown link ID = %d "
198 "(0x%x) up == NULL", __func__
, id
, id
);
201 * Don't return ERROR, since modem misbehaves and sends out
202 * flow on before linksetup response.
204 return /* CFGLU_EPROT; */ 0;
207 ret
= up
->receive(up
, pkt
);
211 static int cfmuxl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
)
214 struct cfmuxl
*muxl
= container_obj(layr
);
217 struct caif_payload_info
*info
= cfpkt_info(pkt
);
218 dn
= get_dn(muxl
, cfpkt_info(pkt
)->dev_info
);
220 pr_warning("CAIF: %s(): Send data on unknown phy "
222 __func__
, info
->dev_info
->id
, info
->dev_info
->id
);
226 linkid
= info
->channel_id
;
227 cfpkt_add_head(pkt
, &linkid
, 1);
228 ret
= dn
->transmit(dn
, pkt
);
229 /* Remove MUX protocol header upon error. */
231 cfpkt_extr_head(pkt
, &linkid
, 1);
235 static void cfmuxl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
238 struct cfmuxl
*muxl
= container_obj(layr
);
239 struct list_head
*node
;
240 struct cflayer
*layer
;
241 list_for_each(node
, &muxl
->srvl_list
) {
242 layer
= list_entry(node
, struct cflayer
, node
);
243 if (cfsrvl_phyid_match(layer
, phyid
))
244 layer
->ctrlcmd(layer
, ctrl
, phyid
);
This page took 0.043431 seconds and 5 git commands to generate.