caif: Bugfix add check NULL pointer before calling functions.
[deliverable/linux.git] / net / caif / cfmuxl.c
1 /*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
13 #include <net/caif/cfpkt.h>
14 #include <net/caif/cfmuxl.h>
15 #include <net/caif/cfsrvl.h>
16 #include <net/caif/cffrml.h>
17
18 #define container_obj(layr) container_of(layr, struct cfmuxl, layer)
19
20 #define CAIF_CTRL_CHANNEL 0
21 #define UP_CACHE_SIZE 8
22 #define DN_CACHE_SIZE 8
23
24 struct cfmuxl {
25 struct cflayer layer;
26 struct list_head srvl_list;
27 struct list_head frml_list;
28 struct cflayer *up_cache[UP_CACHE_SIZE];
29 struct cflayer *dn_cache[DN_CACHE_SIZE];
30 /*
31 * Set when inserting or removing downwards layers.
32 */
33 spinlock_t transmit_lock;
34
35 /*
36 * Set when inserting or removing upwards layers.
37 */
38 spinlock_t receive_lock;
39
40 };
41
42 static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
43 static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
44 static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
45 int phyid);
46 static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
47
48 struct cflayer *cfmuxl_create(void)
49 {
50 struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
51 if (!this)
52 return NULL;
53 memset(this, 0, sizeof(*this));
54 this->layer.receive = cfmuxl_receive;
55 this->layer.transmit = cfmuxl_transmit;
56 this->layer.ctrlcmd = cfmuxl_ctrlcmd;
57 INIT_LIST_HEAD(&this->srvl_list);
58 INIT_LIST_HEAD(&this->frml_list);
59 spin_lock_init(&this->transmit_lock);
60 spin_lock_init(&this->receive_lock);
61 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
62 return &this->layer;
63 }
64
65 int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
66 {
67 struct cfmuxl *muxl = container_obj(layr);
68
69 spin_lock_bh(&muxl->receive_lock);
70 list_add_rcu(&up->node, &muxl->srvl_list);
71 spin_unlock_bh(&muxl->receive_lock);
72 return 0;
73 }
74
75 int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
76 {
77 struct cfmuxl *muxl = (struct cfmuxl *) layr;
78
79 spin_lock_bh(&muxl->transmit_lock);
80 list_add_rcu(&dn->node, &muxl->frml_list);
81 spin_unlock_bh(&muxl->transmit_lock);
82 return 0;
83 }
84
85 static struct cflayer *get_from_id(struct list_head *list, u16 id)
86 {
87 struct cflayer *lyr;
88 list_for_each_entry_rcu(lyr, list, node) {
89 if (lyr->id == id)
90 return lyr;
91 }
92
93 return NULL;
94 }
95
96 struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
97 {
98 struct cfmuxl *muxl = container_obj(layr);
99 struct cflayer *dn;
100 int idx = phyid % DN_CACHE_SIZE;
101
102 spin_lock_bh(&muxl->transmit_lock);
103 rcu_assign_pointer(muxl->dn_cache[idx], NULL);
104 dn = get_from_id(&muxl->frml_list, phyid);
105 if (dn == NULL)
106 goto out;
107
108 list_del_rcu(&dn->node);
109 caif_assert(dn != NULL);
110 out:
111 spin_unlock_bh(&muxl->transmit_lock);
112 return dn;
113 }
114
115 static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
116 {
117 struct cflayer *up;
118 int idx = id % UP_CACHE_SIZE;
119 up = rcu_dereference(muxl->up_cache[idx]);
120 if (up == NULL || up->id != id) {
121 spin_lock_bh(&muxl->receive_lock);
122 up = get_from_id(&muxl->srvl_list, id);
123 rcu_assign_pointer(muxl->up_cache[idx], up);
124 spin_unlock_bh(&muxl->receive_lock);
125 }
126 return up;
127 }
128
129 static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
130 {
131 struct cflayer *dn;
132 int idx = dev_info->id % DN_CACHE_SIZE;
133 dn = rcu_dereference(muxl->dn_cache[idx]);
134 if (dn == NULL || dn->id != dev_info->id) {
135 spin_lock_bh(&muxl->transmit_lock);
136 dn = get_from_id(&muxl->frml_list, dev_info->id);
137 rcu_assign_pointer(muxl->dn_cache[idx], dn);
138 spin_unlock_bh(&muxl->transmit_lock);
139 }
140 return dn;
141 }
142
143 struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
144 {
145 struct cflayer *up;
146 struct cfmuxl *muxl = container_obj(layr);
147 int idx = id % UP_CACHE_SIZE;
148
149 spin_lock_bh(&muxl->receive_lock);
150 up = get_from_id(&muxl->srvl_list, id);
151 if (up == NULL)
152 goto out;
153
154 rcu_assign_pointer(muxl->up_cache[idx], NULL);
155 list_del_rcu(&up->node);
156 out:
157 spin_unlock_bh(&muxl->receive_lock);
158 return up;
159 }
160
161 static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
162 {
163 int ret;
164 struct cfmuxl *muxl = container_obj(layr);
165 u8 id;
166 struct cflayer *up;
167 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
168 pr_err("erroneous Caif Packet\n");
169 cfpkt_destroy(pkt);
170 return -EPROTO;
171 }
172 rcu_read_lock();
173 up = get_up(muxl, id);
174
175 if (up == NULL) {
176 pr_debug("Received data on unknown link ID = %d (0x%x)"
177 " up == NULL", id, id);
178 cfpkt_destroy(pkt);
179 /*
180 * Don't return ERROR, since modem misbehaves and sends out
181 * flow on before linksetup response.
182 */
183
184 rcu_read_unlock();
185 return /* CFGLU_EPROT; */ 0;
186 }
187
188 /* We can't hold rcu_lock during receive, so take a ref count instead */
189 cfsrvl_get(up);
190 rcu_read_unlock();
191
192 ret = up->receive(up, pkt);
193
194 cfsrvl_put(up);
195 return ret;
196 }
197
198 static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
199 {
200 struct cfmuxl *muxl = container_obj(layr);
201 int err;
202 u8 linkid;
203 struct cflayer *dn;
204 struct caif_payload_info *info = cfpkt_info(pkt);
205 BUG_ON(!info);
206
207 rcu_read_lock();
208
209 dn = get_dn(muxl, info->dev_info);
210 if (dn == NULL) {
211 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
212 info->dev_info->id, info->dev_info->id);
213 rcu_read_unlock();
214 cfpkt_destroy(pkt);
215 return -ENOTCONN;
216 }
217
218 info->hdr_len += 1;
219 linkid = info->channel_id;
220 cfpkt_add_head(pkt, &linkid, 1);
221
222 /* We can't hold rcu_lock during receive, so take a ref count instead */
223 cffrml_hold(dn);
224
225 rcu_read_unlock();
226
227 err = dn->transmit(dn, pkt);
228
229 cffrml_put(dn);
230 return err;
231 }
232
233 static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
234 int phyid)
235 {
236 struct cfmuxl *muxl = container_obj(layr);
237 struct cflayer *layer;
238
239 rcu_read_lock();
240 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
241 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
242 /* NOTE: ctrlcmd is not allowed to block */
243 layer->ctrlcmd(layer, ctrl, phyid);
244 }
245 rcu_read_unlock();
246 }
This page took 0.036509 seconds and 6 git commands to generate.