Commit | Line | Data |
---|---|---|
f69bcbf3 AD |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2013 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * The full GNU General Public License is included in this distribution in | |
16 | * the file called "COPYING". | |
17 | * | |
18 | * Intel MIC Host driver. | |
19 | * | |
20 | */ | |
21 | #include <linux/pci.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/uaccess.h> | |
24 | ||
25 | #include <linux/mic_common.h> | |
4aa79961 | 26 | #include "../common/mic_dev.h" |
f69bcbf3 AD |
27 | #include "mic_device.h" |
28 | #include "mic_smpt.h" | |
29 | #include "mic_virtio.h" | |
30 | ||
31 | /* | |
32 | * Initiates the copies across the PCIe bus from card memory to | |
33 | * a user space buffer. | |
34 | */ | |
35 | static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, | |
36 | void __user *ubuf, size_t len, u64 addr) | |
37 | { | |
38 | int err; | |
39 | void __iomem *dbuf = mvdev->mdev->aper.va + addr; | |
40 | /* | |
41 | * We are copying from IO below an should ideally use something | |
42 | * like copy_to_user_fromio(..) if it existed. | |
43 | */ | |
1a928623 | 44 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { |
f69bcbf3 AD |
45 | err = -EFAULT; |
46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
47 | __func__, __LINE__, err); | |
48 | goto err; | |
49 | } | |
50 | mvdev->in_bytes += len; | |
51 | err = 0; | |
52 | err: | |
53 | return err; | |
54 | } | |
55 | ||
56 | /* | |
57 | * Initiates copies across the PCIe bus from a user space | |
58 | * buffer to card memory. | |
59 | */ | |
60 | static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, | |
61 | void __user *ubuf, size_t len, u64 addr) | |
62 | { | |
63 | int err; | |
64 | void __iomem *dbuf = mvdev->mdev->aper.va + addr; | |
65 | /* | |
66 | * We are copying to IO below and should ideally use something | |
67 | * like copy_from_user_toio(..) if it existed. | |
68 | */ | |
1a928623 | 69 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { |
f69bcbf3 AD |
70 | err = -EFAULT; |
71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
72 | __func__, __LINE__, err); | |
73 | goto err; | |
74 | } | |
75 | mvdev->out_bytes += len; | |
76 | err = 0; | |
77 | err: | |
78 | return err; | |
79 | } | |
80 | ||
81 | #define MIC_VRINGH_READ true | |
82 | ||
83 | /* The function to call to notify the card about added buffers */ | |
84 | static void mic_notify(struct vringh *vrh) | |
85 | { | |
86 | struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); | |
87 | struct mic_vdev *mvdev = mvrh->mvdev; | |
88 | s8 db = mvdev->dc->h2c_vdev_db; | |
89 | ||
90 | if (db != -1) | |
91 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | |
92 | } | |
93 | ||
94 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | |
95 | static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) | |
96 | { | |
97 | int i; | |
98 | u32 total = iov->consumed; | |
99 | ||
100 | for (i = 0; i < iov->i; i++) | |
101 | total += iov->iov[i].iov_len; | |
102 | return total; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | |
107 | * This API is heavily based on the vringh_iov_xfer(..) implementation | |
108 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | |
109 | * and vringh_iov_push_kern(..) directly is because there is no | |
110 | * way to override the VRINGH xfer(..) routines as of v3.10. | |
111 | */ | |
112 | static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | |
113 | void __user *ubuf, size_t len, bool read, size_t *out_len) | |
114 | { | |
115 | int ret = 0; | |
116 | size_t partlen, tot_len = 0; | |
117 | ||
118 | while (len && iov->i < iov->used) { | |
119 | partlen = min(iov->iov[iov->i].iov_len, len); | |
120 | if (read) | |
121 | ret = mic_virtio_copy_to_user(mvdev, | |
122 | ubuf, partlen, | |
123 | (u64)iov->iov[iov->i].iov_base); | |
124 | else | |
125 | ret = mic_virtio_copy_from_user(mvdev, | |
126 | ubuf, partlen, | |
127 | (u64)iov->iov[iov->i].iov_base); | |
128 | if (ret) { | |
129 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
130 | __func__, __LINE__, ret); | |
131 | break; | |
132 | } | |
133 | len -= partlen; | |
134 | ubuf += partlen; | |
135 | tot_len += partlen; | |
136 | iov->consumed += partlen; | |
137 | iov->iov[iov->i].iov_len -= partlen; | |
138 | iov->iov[iov->i].iov_base += partlen; | |
139 | if (!iov->iov[iov->i].iov_len) { | |
140 | /* Fix up old iov element then increment. */ | |
141 | iov->iov[iov->i].iov_len = iov->consumed; | |
142 | iov->iov[iov->i].iov_base -= iov->consumed; | |
143 | ||
144 | iov->consumed = 0; | |
145 | iov->i++; | |
146 | } | |
147 | } | |
148 | *out_len = tot_len; | |
149 | return ret; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Use the standard VRINGH infrastructure in the kernel to fetch new | |
154 | * descriptors, initiate the copies and update the used ring. | |
155 | */ | |
156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | |
157 | struct mic_copy_desc *copy) | |
158 | { | |
3b1cc9b9 SD |
159 | int ret = 0; |
160 | u32 iovcnt = copy->iovcnt; | |
f69bcbf3 AD |
161 | struct iovec iov; |
162 | struct iovec __user *u_iov = copy->iov; | |
163 | void __user *ubuf = NULL; | |
164 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | |
165 | struct vringh_kiov *riov = &mvr->riov; | |
166 | struct vringh_kiov *wiov = &mvr->wiov; | |
167 | struct vringh *vrh = &mvr->vrh; | |
168 | u16 *head = &mvr->head; | |
169 | struct mic_vring *vr = &mvr->vring; | |
170 | size_t len = 0, out_len; | |
171 | ||
172 | copy->out_len = 0; | |
173 | /* Fetch a new IOVEC if all previous elements have been processed */ | |
174 | if (riov->i == riov->used && wiov->i == wiov->used) { | |
175 | ret = vringh_getdesc_kern(vrh, riov, wiov, | |
176 | head, GFP_KERNEL); | |
177 | /* Check if there are available descriptors */ | |
178 | if (ret <= 0) | |
179 | return ret; | |
180 | } | |
181 | while (iovcnt) { | |
182 | if (!len) { | |
183 | /* Copy over a new iovec from user space. */ | |
184 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | |
185 | if (ret) { | |
186 | ret = -EINVAL; | |
187 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
188 | __func__, __LINE__, ret); | |
189 | break; | |
190 | } | |
191 | len = iov.iov_len; | |
192 | ubuf = iov.iov_base; | |
193 | } | |
194 | /* Issue all the read descriptors first */ | |
195 | ret = mic_vringh_copy(mvdev, riov, ubuf, len, | |
196 | MIC_VRINGH_READ, &out_len); | |
197 | if (ret) { | |
198 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
ced2c60f | 199 | __func__, __LINE__, ret); |
f69bcbf3 AD |
200 | break; |
201 | } | |
202 | len -= out_len; | |
203 | ubuf += out_len; | |
204 | copy->out_len += out_len; | |
205 | /* Issue the write descriptors next */ | |
206 | ret = mic_vringh_copy(mvdev, wiov, ubuf, len, | |
207 | !MIC_VRINGH_READ, &out_len); | |
208 | if (ret) { | |
209 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
ced2c60f | 210 | __func__, __LINE__, ret); |
f69bcbf3 AD |
211 | break; |
212 | } | |
213 | len -= out_len; | |
214 | ubuf += out_len; | |
215 | copy->out_len += out_len; | |
216 | if (!len) { | |
217 | /* One user space iovec is now completed */ | |
218 | iovcnt--; | |
219 | u_iov++; | |
220 | } | |
221 | /* Exit loop if all elements in KIOVs have been processed. */ | |
222 | if (riov->i == riov->used && wiov->i == wiov->used) | |
223 | break; | |
224 | } | |
225 | /* | |
226 | * Update the used ring if a descriptor was available and some data was | |
227 | * copied in/out and the user asked for a used ring update. | |
228 | */ | |
ced2c60f | 229 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { |
f69bcbf3 AD |
230 | u32 total = 0; |
231 | ||
232 | /* Determine the total data consumed */ | |
233 | total += mic_vringh_iov_consumed(riov); | |
234 | total += mic_vringh_iov_consumed(wiov); | |
235 | vringh_complete_kern(vrh, *head, total); | |
236 | *head = USHRT_MAX; | |
237 | if (vringh_need_notify_kern(vrh) > 0) | |
238 | vringh_notify(vrh); | |
239 | vringh_kiov_cleanup(riov); | |
240 | vringh_kiov_cleanup(wiov); | |
241 | /* Update avail idx for user space */ | |
242 | vr->info->avail_idx = vrh->last_avail_idx; | |
243 | } | |
244 | return ret; | |
245 | } | |
246 | ||
247 | static inline int mic_verify_copy_args(struct mic_vdev *mvdev, | |
248 | struct mic_copy_desc *copy) | |
249 | { | |
250 | if (copy->vr_idx >= mvdev->dd->num_vq) { | |
251 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
252 | __func__, __LINE__, -EINVAL); | |
253 | return -EINVAL; | |
254 | } | |
255 | return 0; | |
256 | } | |
257 | ||
258 | /* Copy a specified number of virtio descriptors in a chain */ | |
259 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | |
260 | struct mic_copy_desc *copy) | |
261 | { | |
262 | int err; | |
263 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | |
264 | ||
265 | err = mic_verify_copy_args(mvdev, copy); | |
266 | if (err) | |
267 | return err; | |
268 | ||
269 | mutex_lock(&mvr->vr_mutex); | |
270 | if (!mic_vdevup(mvdev)) { | |
271 | err = -ENODEV; | |
272 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
273 | __func__, __LINE__, err); | |
274 | goto err; | |
275 | } | |
276 | err = _mic_virtio_copy(mvdev, copy); | |
277 | if (err) { | |
278 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
279 | __func__, __LINE__, err); | |
280 | } | |
281 | err: | |
282 | mutex_unlock(&mvr->vr_mutex); | |
283 | return err; | |
284 | } | |
285 | ||
286 | static void mic_virtio_init_post(struct mic_vdev *mvdev) | |
287 | { | |
288 | struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); | |
289 | int i; | |
290 | ||
291 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
292 | if (!le64_to_cpu(vqconfig[i].used_address)) { | |
293 | dev_warn(mic_dev(mvdev), "used_address zero??\n"); | |
294 | continue; | |
295 | } | |
296 | mvdev->mvr[i].vrh.vring.used = | |
1a928623 | 297 | (void __force *)mvdev->mdev->aper.va + |
f69bcbf3 AD |
298 | le64_to_cpu(vqconfig[i].used_address); |
299 | } | |
300 | ||
301 | mvdev->dc->used_address_updated = 0; | |
302 | ||
303 | dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", | |
304 | __func__, mvdev->virtio_id); | |
305 | } | |
306 | ||
307 | static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) | |
308 | { | |
309 | int i; | |
310 | ||
311 | dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", | |
312 | __func__, mvdev->dd->status, mvdev->virtio_id); | |
313 | ||
314 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
315 | /* | |
316 | * Avoid lockdep false positive. The + 1 is for the mic | |
317 | * mutex which is held in the reset devices code path. | |
318 | */ | |
319 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | |
320 | ||
321 | /* 0 status means "reset" */ | |
322 | mvdev->dd->status = 0; | |
323 | mvdev->dc->vdev_reset = 0; | |
324 | mvdev->dc->host_ack = 1; | |
325 | ||
326 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
327 | struct vringh *vrh = &mvdev->mvr[i].vrh; | |
328 | mvdev->mvr[i].vring.info->avail_idx = 0; | |
329 | vrh->completed = 0; | |
330 | vrh->last_avail_idx = 0; | |
331 | vrh->last_used_idx = 0; | |
332 | } | |
333 | ||
334 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
335 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | |
336 | } | |
337 | ||
338 | void mic_virtio_reset_devices(struct mic_device *mdev) | |
339 | { | |
340 | struct list_head *pos, *tmp; | |
341 | struct mic_vdev *mvdev; | |
342 | ||
343 | dev_dbg(mdev->sdev->parent, "%s\n", __func__); | |
344 | ||
345 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | |
346 | mvdev = list_entry(pos, struct mic_vdev, list); | |
347 | mic_virtio_device_reset(mvdev); | |
348 | mvdev->poll_wake = 1; | |
349 | wake_up(&mvdev->waitq); | |
350 | } | |
351 | } | |
352 | ||
353 | void mic_bh_handler(struct work_struct *work) | |
354 | { | |
355 | struct mic_vdev *mvdev = container_of(work, struct mic_vdev, | |
356 | virtio_bh_work); | |
357 | ||
358 | if (mvdev->dc->used_address_updated) | |
359 | mic_virtio_init_post(mvdev); | |
360 | ||
361 | if (mvdev->dc->vdev_reset) | |
362 | mic_virtio_device_reset(mvdev); | |
363 | ||
364 | mvdev->poll_wake = 1; | |
365 | wake_up(&mvdev->waitq); | |
366 | } | |
367 | ||
368 | static irqreturn_t mic_virtio_intr_handler(int irq, void *data) | |
369 | { | |
f69bcbf3 AD |
370 | struct mic_vdev *mvdev = data; |
371 | struct mic_device *mdev = mvdev->mdev; | |
372 | ||
df5e4e8b | 373 | mdev->ops->intr_workarounds(mdev); |
f69bcbf3 AD |
374 | schedule_work(&mvdev->virtio_bh_work); |
375 | return IRQ_HANDLED; | |
376 | } | |
377 | ||
378 | int mic_virtio_config_change(struct mic_vdev *mvdev, | |
379 | void __user *argp) | |
380 | { | |
381 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | |
9420b348 | 382 | int ret = 0, retry, i; |
f69bcbf3 AD |
383 | struct mic_bootparam *bootparam = mvdev->mdev->dp; |
384 | s8 db = bootparam->h2c_config_db; | |
385 | ||
386 | mutex_lock(&mvdev->mdev->mic_mutex); | |
387 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
388 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | |
389 | ||
390 | if (db == -1 || mvdev->dd->type == -1) { | |
391 | ret = -EIO; | |
392 | goto exit; | |
393 | } | |
394 | ||
395 | if (copy_from_user(mic_vq_configspace(mvdev->dd), | |
ced2c60f | 396 | argp, mvdev->dd->config_len)) { |
f69bcbf3 AD |
397 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
398 | __func__, __LINE__, -EFAULT); | |
399 | ret = -EFAULT; | |
400 | goto exit; | |
401 | } | |
402 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | |
403 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | |
404 | ||
9420b348 | 405 | for (retry = 100; retry--;) { |
f69bcbf3 AD |
406 | ret = wait_event_timeout(wake, |
407 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | |
408 | if (ret) | |
409 | break; | |
410 | } | |
411 | ||
412 | dev_dbg(mic_dev(mvdev), | |
413 | "%s %d retry: %d\n", __func__, __LINE__, retry); | |
414 | mvdev->dc->config_change = 0; | |
415 | mvdev->dc->guest_ack = 0; | |
416 | exit: | |
417 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
418 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | |
419 | mutex_unlock(&mvdev->mdev->mic_mutex); | |
420 | return ret; | |
421 | } | |
422 | ||
423 | static int mic_copy_dp_entry(struct mic_vdev *mvdev, | |
424 | void __user *argp, | |
425 | __u8 *type, | |
426 | struct mic_device_desc **devpage) | |
427 | { | |
428 | struct mic_device *mdev = mvdev->mdev; | |
429 | struct mic_device_desc dd, *dd_config, *devp; | |
430 | struct mic_vqconfig *vqconfig; | |
431 | int ret = 0, i; | |
432 | bool slot_found = false; | |
433 | ||
434 | if (copy_from_user(&dd, argp, sizeof(dd))) { | |
435 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
436 | __func__, __LINE__, -EFAULT); | |
437 | return -EFAULT; | |
438 | } | |
439 | ||
ced2c60f AD |
440 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || |
441 | dd.num_vq > MIC_MAX_VRINGS) { | |
f69bcbf3 AD |
442 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
443 | __func__, __LINE__, -EINVAL); | |
444 | return -EINVAL; | |
445 | } | |
446 | ||
447 | dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); | |
448 | if (dd_config == NULL) { | |
449 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
450 | __func__, __LINE__, -ENOMEM); | |
451 | return -ENOMEM; | |
452 | } | |
453 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | |
454 | ret = -EFAULT; | |
455 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
456 | __func__, __LINE__, ret); | |
457 | goto exit; | |
458 | } | |
459 | ||
460 | vqconfig = mic_vq_config(dd_config); | |
461 | for (i = 0; i < dd.num_vq; i++) { | |
462 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | |
463 | ret = -EINVAL; | |
464 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
465 | __func__, __LINE__, ret); | |
466 | goto exit; | |
467 | } | |
468 | } | |
469 | ||
470 | /* Find the first free device page entry */ | |
1e31aa92 | 471 | for (i = sizeof(struct mic_bootparam); |
f69bcbf3 AD |
472 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); |
473 | i += mic_total_desc_size(devp)) { | |
474 | devp = mdev->dp + i; | |
475 | if (devp->type == 0 || devp->type == -1) { | |
476 | slot_found = true; | |
477 | break; | |
478 | } | |
479 | } | |
480 | if (!slot_found) { | |
481 | ret = -EINVAL; | |
482 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
483 | __func__, __LINE__, ret); | |
484 | goto exit; | |
485 | } | |
486 | /* | |
487 | * Save off the type before doing the memcpy. Type will be set in the | |
488 | * end after completing all initialization for the new device. | |
489 | */ | |
490 | *type = dd_config->type; | |
491 | dd_config->type = 0; | |
492 | memcpy(devp, dd_config, mic_desc_size(dd_config)); | |
493 | ||
494 | *devpage = devp; | |
495 | exit: | |
496 | kfree(dd_config); | |
497 | return ret; | |
498 | } | |
499 | ||
500 | static void mic_init_device_ctrl(struct mic_vdev *mvdev, | |
501 | struct mic_device_desc *devpage) | |
502 | { | |
503 | struct mic_device_ctrl *dc; | |
504 | ||
ced2c60f | 505 | dc = (void *)devpage + mic_aligned_desc_size(devpage); |
f69bcbf3 AD |
506 | |
507 | dc->config_change = 0; | |
508 | dc->guest_ack = 0; | |
509 | dc->vdev_reset = 0; | |
510 | dc->host_ack = 0; | |
511 | dc->used_address_updated = 0; | |
512 | dc->c2h_vdev_db = -1; | |
513 | dc->h2c_vdev_db = -1; | |
ced2c60f | 514 | mvdev->dc = dc; |
f69bcbf3 AD |
515 | } |
516 | ||
517 | int mic_virtio_add_device(struct mic_vdev *mvdev, | |
518 | void __user *argp) | |
519 | { | |
520 | struct mic_device *mdev = mvdev->mdev; | |
42579226 | 521 | struct mic_device_desc *dd = NULL; |
f69bcbf3 AD |
522 | struct mic_vqconfig *vqconfig; |
523 | int vr_size, i, j, ret; | |
42579226 | 524 | u8 type = 0; |
f69bcbf3 AD |
525 | s8 db; |
526 | char irqname[10]; | |
527 | struct mic_bootparam *bootparam = mdev->dp; | |
528 | u16 num; | |
173c0727 | 529 | dma_addr_t vr_addr; |
f69bcbf3 AD |
530 | |
531 | mutex_lock(&mdev->mic_mutex); | |
532 | ||
533 | ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); | |
534 | if (ret) { | |
535 | mutex_unlock(&mdev->mic_mutex); | |
536 | return ret; | |
537 | } | |
538 | ||
539 | mic_init_device_ctrl(mvdev, dd); | |
540 | ||
541 | mvdev->dd = dd; | |
542 | mvdev->virtio_id = type; | |
543 | vqconfig = mic_vq_config(dd); | |
544 | INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); | |
545 | ||
546 | for (i = 0; i < dd->num_vq; i++) { | |
547 | struct mic_vringh *mvr = &mvdev->mvr[i]; | |
548 | struct mic_vring *vr = &mvdev->mvr[i].vring; | |
549 | num = le16_to_cpu(vqconfig[i].num); | |
550 | mutex_init(&mvr->vr_mutex); | |
551 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | |
552 | sizeof(struct _mic_vring_info)); | |
553 | vr->va = (void *) | |
554 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
ced2c60f | 555 | get_order(vr_size)); |
f69bcbf3 AD |
556 | if (!vr->va) { |
557 | ret = -ENOMEM; | |
558 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
559 | __func__, __LINE__, ret); | |
560 | goto err; | |
561 | } | |
562 | vr->len = vr_size; | |
563 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | |
173c0727 AD |
564 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); |
565 | vr_addr = mic_map_single(mdev, vr->va, vr_size); | |
566 | if (mic_map_error(vr_addr)) { | |
ced2c60f | 567 | free_pages((unsigned long)vr->va, get_order(vr_size)); |
f69bcbf3 AD |
568 | ret = -ENOMEM; |
569 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
570 | __func__, __LINE__, ret); | |
571 | goto err; | |
572 | } | |
173c0727 | 573 | vqconfig[i].address = cpu_to_le64(vr_addr); |
f69bcbf3 | 574 | |
ced2c60f | 575 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); |
f69bcbf3 AD |
576 | ret = vringh_init_kern(&mvr->vrh, |
577 | *(u32 *)mic_vq_features(mvdev->dd), num, false, | |
578 | vr->vr.desc, vr->vr.avail, vr->vr.used); | |
579 | if (ret) { | |
580 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
581 | __func__, __LINE__, ret); | |
582 | goto err; | |
583 | } | |
584 | vringh_kiov_init(&mvr->riov, NULL, 0); | |
585 | vringh_kiov_init(&mvr->wiov, NULL, 0); | |
586 | mvr->head = USHRT_MAX; | |
587 | mvr->mvdev = mvdev; | |
588 | mvr->vrh.notify = mic_notify; | |
589 | dev_dbg(mdev->sdev->parent, | |
590 | "%s %d index %d va %p info %p vr_size 0x%x\n", | |
591 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | |
592 | } | |
593 | ||
ced2c60f AD |
594 | snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, |
595 | mvdev->virtio_id); | |
f69bcbf3 AD |
596 | mvdev->virtio_db = mic_next_db(mdev); |
597 | mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler, | |
598 | irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB); | |
599 | if (IS_ERR(mvdev->virtio_cookie)) { | |
600 | ret = PTR_ERR(mvdev->virtio_cookie); | |
601 | dev_dbg(mdev->sdev->parent, "request irq failed\n"); | |
602 | goto err; | |
603 | } | |
604 | ||
605 | mvdev->dc->c2h_vdev_db = mvdev->virtio_db; | |
606 | ||
607 | list_add_tail(&mvdev->list, &mdev->vdev_list); | |
608 | /* | |
609 | * Order the type update with previous stores. This write barrier | |
610 | * is paired with the corresponding read barrier before the uncached | |
611 | * system memory read of the type, on the card while scanning the | |
612 | * device page. | |
613 | */ | |
614 | smp_wmb(); | |
615 | dd->type = type; | |
616 | ||
617 | dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type); | |
618 | ||
619 | db = bootparam->h2c_config_db; | |
620 | if (db != -1) | |
621 | mdev->ops->send_intr(mdev, db); | |
622 | mutex_unlock(&mdev->mic_mutex); | |
623 | return 0; | |
624 | err: | |
625 | vqconfig = mic_vq_config(dd); | |
626 | for (j = 0; j < i; j++) { | |
627 | struct mic_vringh *mvr = &mvdev->mvr[j]; | |
628 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), | |
ced2c60f | 629 | mvr->vring.len); |
f69bcbf3 | 630 | free_pages((unsigned long)mvr->vring.va, |
ced2c60f | 631 | get_order(mvr->vring.len)); |
f69bcbf3 AD |
632 | } |
633 | mutex_unlock(&mdev->mic_mutex); | |
634 | return ret; | |
635 | } | |
636 | ||
637 | void mic_virtio_del_device(struct mic_vdev *mvdev) | |
638 | { | |
639 | struct list_head *pos, *tmp; | |
640 | struct mic_vdev *tmp_mvdev; | |
641 | struct mic_device *mdev = mvdev->mdev; | |
642 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | |
9420b348 | 643 | int i, ret, retry; |
f69bcbf3 AD |
644 | struct mic_vqconfig *vqconfig; |
645 | struct mic_bootparam *bootparam = mdev->dp; | |
646 | s8 db; | |
647 | ||
648 | mutex_lock(&mdev->mic_mutex); | |
649 | db = bootparam->h2c_config_db; | |
650 | if (db == -1) | |
651 | goto skip_hot_remove; | |
652 | dev_dbg(mdev->sdev->parent, | |
653 | "Requesting hot remove id %d\n", mvdev->virtio_id); | |
654 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | |
655 | mdev->ops->send_intr(mdev, db); | |
9420b348 | 656 | for (retry = 100; retry--;) { |
f69bcbf3 AD |
657 | ret = wait_event_timeout(wake, |
658 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | |
659 | if (ret) | |
660 | break; | |
661 | } | |
662 | dev_dbg(mdev->sdev->parent, | |
9420b348 | 663 | "Device id %d config_change %d guest_ack %d retry %d\n", |
f69bcbf3 | 664 | mvdev->virtio_id, mvdev->dc->config_change, |
9420b348 | 665 | mvdev->dc->guest_ack, retry); |
f69bcbf3 AD |
666 | mvdev->dc->config_change = 0; |
667 | mvdev->dc->guest_ack = 0; | |
668 | skip_hot_remove: | |
669 | mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); | |
670 | flush_work(&mvdev->virtio_bh_work); | |
671 | vqconfig = mic_vq_config(mvdev->dd); | |
672 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
673 | struct mic_vringh *mvr = &mvdev->mvr[i]; | |
674 | vringh_kiov_cleanup(&mvr->riov); | |
675 | vringh_kiov_cleanup(&mvr->wiov); | |
676 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), | |
ced2c60f | 677 | mvr->vring.len); |
f69bcbf3 | 678 | free_pages((unsigned long)mvr->vring.va, |
ced2c60f | 679 | get_order(mvr->vring.len)); |
f69bcbf3 AD |
680 | } |
681 | ||
682 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | |
683 | tmp_mvdev = list_entry(pos, struct mic_vdev, list); | |
684 | if (tmp_mvdev == mvdev) { | |
685 | list_del(pos); | |
686 | dev_dbg(mdev->sdev->parent, | |
687 | "Removing virtio device id %d\n", | |
688 | mvdev->virtio_id); | |
689 | break; | |
690 | } | |
691 | } | |
692 | /* | |
693 | * Order the type update with previous stores. This write barrier | |
694 | * is paired with the corresponding read barrier before the uncached | |
695 | * system memory read of the type, on the card while scanning the | |
696 | * device page. | |
697 | */ | |
698 | smp_wmb(); | |
699 | mvdev->dd->type = -1; | |
700 | mutex_unlock(&mdev->mic_mutex); | |
701 | } |