Commit | Line | Data |
---|---|---|
f69bcbf3 AD |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2013 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * The full GNU General Public License is included in this distribution in | |
16 | * the file called "COPYING". | |
17 | * | |
18 | * Intel MIC Host driver. | |
19 | * | |
20 | */ | |
21 | #include <linux/pci.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/uaccess.h> | |
24 | ||
25 | #include <linux/mic_common.h> | |
4aa79961 | 26 | #include "../common/mic_dev.h" |
f69bcbf3 AD |
27 | #include "mic_device.h" |
28 | #include "mic_smpt.h" | |
29 | #include "mic_virtio.h" | |
30 | ||
31 | /* | |
32 | * Initiates the copies across the PCIe bus from card memory to | |
33 | * a user space buffer. | |
34 | */ | |
35 | static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, | |
36 | void __user *ubuf, size_t len, u64 addr) | |
37 | { | |
38 | int err; | |
39 | void __iomem *dbuf = mvdev->mdev->aper.va + addr; | |
40 | /* | |
41 | * We are copying from IO below an should ideally use something | |
42 | * like copy_to_user_fromio(..) if it existed. | |
43 | */ | |
1a928623 | 44 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { |
f69bcbf3 AD |
45 | err = -EFAULT; |
46 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
47 | __func__, __LINE__, err); | |
48 | goto err; | |
49 | } | |
50 | mvdev->in_bytes += len; | |
51 | err = 0; | |
52 | err: | |
53 | return err; | |
54 | } | |
55 | ||
56 | /* | |
57 | * Initiates copies across the PCIe bus from a user space | |
58 | * buffer to card memory. | |
59 | */ | |
60 | static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, | |
61 | void __user *ubuf, size_t len, u64 addr) | |
62 | { | |
63 | int err; | |
64 | void __iomem *dbuf = mvdev->mdev->aper.va + addr; | |
65 | /* | |
66 | * We are copying to IO below and should ideally use something | |
67 | * like copy_from_user_toio(..) if it existed. | |
68 | */ | |
1a928623 | 69 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { |
f69bcbf3 AD |
70 | err = -EFAULT; |
71 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
72 | __func__, __LINE__, err); | |
73 | goto err; | |
74 | } | |
75 | mvdev->out_bytes += len; | |
76 | err = 0; | |
77 | err: | |
78 | return err; | |
79 | } | |
80 | ||
81 | #define MIC_VRINGH_READ true | |
82 | ||
83 | /* The function to call to notify the card about added buffers */ | |
84 | static void mic_notify(struct vringh *vrh) | |
85 | { | |
86 | struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); | |
87 | struct mic_vdev *mvdev = mvrh->mvdev; | |
88 | s8 db = mvdev->dc->h2c_vdev_db; | |
89 | ||
90 | if (db != -1) | |
91 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | |
92 | } | |
93 | ||
94 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | |
95 | static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) | |
96 | { | |
97 | int i; | |
98 | u32 total = iov->consumed; | |
99 | ||
100 | for (i = 0; i < iov->i; i++) | |
101 | total += iov->iov[i].iov_len; | |
102 | return total; | |
103 | } | |
104 | ||
105 | /* | |
106 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | |
107 | * This API is heavily based on the vringh_iov_xfer(..) implementation | |
108 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | |
109 | * and vringh_iov_push_kern(..) directly is because there is no | |
110 | * way to override the VRINGH xfer(..) routines as of v3.10. | |
111 | */ | |
112 | static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | |
113 | void __user *ubuf, size_t len, bool read, size_t *out_len) | |
114 | { | |
115 | int ret = 0; | |
116 | size_t partlen, tot_len = 0; | |
117 | ||
118 | while (len && iov->i < iov->used) { | |
119 | partlen = min(iov->iov[iov->i].iov_len, len); | |
120 | if (read) | |
121 | ret = mic_virtio_copy_to_user(mvdev, | |
122 | ubuf, partlen, | |
123 | (u64)iov->iov[iov->i].iov_base); | |
124 | else | |
125 | ret = mic_virtio_copy_from_user(mvdev, | |
126 | ubuf, partlen, | |
127 | (u64)iov->iov[iov->i].iov_base); | |
128 | if (ret) { | |
129 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
130 | __func__, __LINE__, ret); | |
131 | break; | |
132 | } | |
133 | len -= partlen; | |
134 | ubuf += partlen; | |
135 | tot_len += partlen; | |
136 | iov->consumed += partlen; | |
137 | iov->iov[iov->i].iov_len -= partlen; | |
138 | iov->iov[iov->i].iov_base += partlen; | |
139 | if (!iov->iov[iov->i].iov_len) { | |
140 | /* Fix up old iov element then increment. */ | |
141 | iov->iov[iov->i].iov_len = iov->consumed; | |
142 | iov->iov[iov->i].iov_base -= iov->consumed; | |
143 | ||
144 | iov->consumed = 0; | |
145 | iov->i++; | |
146 | } | |
147 | } | |
148 | *out_len = tot_len; | |
149 | return ret; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Use the standard VRINGH infrastructure in the kernel to fetch new | |
154 | * descriptors, initiate the copies and update the used ring. | |
155 | */ | |
156 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | |
157 | struct mic_copy_desc *copy) | |
158 | { | |
159 | int ret = 0, iovcnt = copy->iovcnt; | |
160 | struct iovec iov; | |
161 | struct iovec __user *u_iov = copy->iov; | |
162 | void __user *ubuf = NULL; | |
163 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | |
164 | struct vringh_kiov *riov = &mvr->riov; | |
165 | struct vringh_kiov *wiov = &mvr->wiov; | |
166 | struct vringh *vrh = &mvr->vrh; | |
167 | u16 *head = &mvr->head; | |
168 | struct mic_vring *vr = &mvr->vring; | |
169 | size_t len = 0, out_len; | |
170 | ||
171 | copy->out_len = 0; | |
172 | /* Fetch a new IOVEC if all previous elements have been processed */ | |
173 | if (riov->i == riov->used && wiov->i == wiov->used) { | |
174 | ret = vringh_getdesc_kern(vrh, riov, wiov, | |
175 | head, GFP_KERNEL); | |
176 | /* Check if there are available descriptors */ | |
177 | if (ret <= 0) | |
178 | return ret; | |
179 | } | |
180 | while (iovcnt) { | |
181 | if (!len) { | |
182 | /* Copy over a new iovec from user space. */ | |
183 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | |
184 | if (ret) { | |
185 | ret = -EINVAL; | |
186 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
187 | __func__, __LINE__, ret); | |
188 | break; | |
189 | } | |
190 | len = iov.iov_len; | |
191 | ubuf = iov.iov_base; | |
192 | } | |
193 | /* Issue all the read descriptors first */ | |
194 | ret = mic_vringh_copy(mvdev, riov, ubuf, len, | |
195 | MIC_VRINGH_READ, &out_len); | |
196 | if (ret) { | |
197 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
ced2c60f | 198 | __func__, __LINE__, ret); |
f69bcbf3 AD |
199 | break; |
200 | } | |
201 | len -= out_len; | |
202 | ubuf += out_len; | |
203 | copy->out_len += out_len; | |
204 | /* Issue the write descriptors next */ | |
205 | ret = mic_vringh_copy(mvdev, wiov, ubuf, len, | |
206 | !MIC_VRINGH_READ, &out_len); | |
207 | if (ret) { | |
208 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
ced2c60f | 209 | __func__, __LINE__, ret); |
f69bcbf3 AD |
210 | break; |
211 | } | |
212 | len -= out_len; | |
213 | ubuf += out_len; | |
214 | copy->out_len += out_len; | |
215 | if (!len) { | |
216 | /* One user space iovec is now completed */ | |
217 | iovcnt--; | |
218 | u_iov++; | |
219 | } | |
220 | /* Exit loop if all elements in KIOVs have been processed. */ | |
221 | if (riov->i == riov->used && wiov->i == wiov->used) | |
222 | break; | |
223 | } | |
224 | /* | |
225 | * Update the used ring if a descriptor was available and some data was | |
226 | * copied in/out and the user asked for a used ring update. | |
227 | */ | |
ced2c60f | 228 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { |
f69bcbf3 AD |
229 | u32 total = 0; |
230 | ||
231 | /* Determine the total data consumed */ | |
232 | total += mic_vringh_iov_consumed(riov); | |
233 | total += mic_vringh_iov_consumed(wiov); | |
234 | vringh_complete_kern(vrh, *head, total); | |
235 | *head = USHRT_MAX; | |
236 | if (vringh_need_notify_kern(vrh) > 0) | |
237 | vringh_notify(vrh); | |
238 | vringh_kiov_cleanup(riov); | |
239 | vringh_kiov_cleanup(wiov); | |
240 | /* Update avail idx for user space */ | |
241 | vr->info->avail_idx = vrh->last_avail_idx; | |
242 | } | |
243 | return ret; | |
244 | } | |
245 | ||
246 | static inline int mic_verify_copy_args(struct mic_vdev *mvdev, | |
247 | struct mic_copy_desc *copy) | |
248 | { | |
249 | if (copy->vr_idx >= mvdev->dd->num_vq) { | |
250 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
251 | __func__, __LINE__, -EINVAL); | |
252 | return -EINVAL; | |
253 | } | |
254 | return 0; | |
255 | } | |
256 | ||
257 | /* Copy a specified number of virtio descriptors in a chain */ | |
258 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | |
259 | struct mic_copy_desc *copy) | |
260 | { | |
261 | int err; | |
262 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | |
263 | ||
264 | err = mic_verify_copy_args(mvdev, copy); | |
265 | if (err) | |
266 | return err; | |
267 | ||
268 | mutex_lock(&mvr->vr_mutex); | |
269 | if (!mic_vdevup(mvdev)) { | |
270 | err = -ENODEV; | |
271 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
272 | __func__, __LINE__, err); | |
273 | goto err; | |
274 | } | |
275 | err = _mic_virtio_copy(mvdev, copy); | |
276 | if (err) { | |
277 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
278 | __func__, __LINE__, err); | |
279 | } | |
280 | err: | |
281 | mutex_unlock(&mvr->vr_mutex); | |
282 | return err; | |
283 | } | |
284 | ||
285 | static void mic_virtio_init_post(struct mic_vdev *mvdev) | |
286 | { | |
287 | struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); | |
288 | int i; | |
289 | ||
290 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
291 | if (!le64_to_cpu(vqconfig[i].used_address)) { | |
292 | dev_warn(mic_dev(mvdev), "used_address zero??\n"); | |
293 | continue; | |
294 | } | |
295 | mvdev->mvr[i].vrh.vring.used = | |
1a928623 AD |
296 | (void __force *)mvdev->mdev->aper.va + |
297 | le64_to_cpu(vqconfig[i].used_address); | |
f69bcbf3 AD |
298 | } |
299 | ||
300 | mvdev->dc->used_address_updated = 0; | |
301 | ||
302 | dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", | |
303 | __func__, mvdev->virtio_id); | |
304 | } | |
305 | ||
306 | static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) | |
307 | { | |
308 | int i; | |
309 | ||
310 | dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", | |
311 | __func__, mvdev->dd->status, mvdev->virtio_id); | |
312 | ||
313 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
314 | /* | |
315 | * Avoid lockdep false positive. The + 1 is for the mic | |
316 | * mutex which is held in the reset devices code path. | |
317 | */ | |
318 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | |
319 | ||
320 | /* 0 status means "reset" */ | |
321 | mvdev->dd->status = 0; | |
322 | mvdev->dc->vdev_reset = 0; | |
323 | mvdev->dc->host_ack = 1; | |
324 | ||
325 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
326 | struct vringh *vrh = &mvdev->mvr[i].vrh; | |
327 | mvdev->mvr[i].vring.info->avail_idx = 0; | |
328 | vrh->completed = 0; | |
329 | vrh->last_avail_idx = 0; | |
330 | vrh->last_used_idx = 0; | |
331 | } | |
332 | ||
333 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
334 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | |
335 | } | |
336 | ||
337 | void mic_virtio_reset_devices(struct mic_device *mdev) | |
338 | { | |
339 | struct list_head *pos, *tmp; | |
340 | struct mic_vdev *mvdev; | |
341 | ||
342 | dev_dbg(mdev->sdev->parent, "%s\n", __func__); | |
343 | ||
344 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | |
345 | mvdev = list_entry(pos, struct mic_vdev, list); | |
346 | mic_virtio_device_reset(mvdev); | |
347 | mvdev->poll_wake = 1; | |
348 | wake_up(&mvdev->waitq); | |
349 | } | |
350 | } | |
351 | ||
352 | void mic_bh_handler(struct work_struct *work) | |
353 | { | |
354 | struct mic_vdev *mvdev = container_of(work, struct mic_vdev, | |
355 | virtio_bh_work); | |
356 | ||
357 | if (mvdev->dc->used_address_updated) | |
358 | mic_virtio_init_post(mvdev); | |
359 | ||
360 | if (mvdev->dc->vdev_reset) | |
361 | mic_virtio_device_reset(mvdev); | |
362 | ||
363 | mvdev->poll_wake = 1; | |
364 | wake_up(&mvdev->waitq); | |
365 | } | |
366 | ||
367 | static irqreturn_t mic_virtio_intr_handler(int irq, void *data) | |
368 | { | |
f69bcbf3 AD |
369 | struct mic_vdev *mvdev = data; |
370 | struct mic_device *mdev = mvdev->mdev; | |
371 | ||
372 | mdev->ops->ack_interrupt(mdev); | |
373 | schedule_work(&mvdev->virtio_bh_work); | |
374 | return IRQ_HANDLED; | |
375 | } | |
376 | ||
377 | int mic_virtio_config_change(struct mic_vdev *mvdev, | |
378 | void __user *argp) | |
379 | { | |
380 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | |
9420b348 | 381 | int ret = 0, retry, i; |
f69bcbf3 AD |
382 | struct mic_bootparam *bootparam = mvdev->mdev->dp; |
383 | s8 db = bootparam->h2c_config_db; | |
384 | ||
385 | mutex_lock(&mvdev->mdev->mic_mutex); | |
386 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
387 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | |
388 | ||
389 | if (db == -1 || mvdev->dd->type == -1) { | |
390 | ret = -EIO; | |
391 | goto exit; | |
392 | } | |
393 | ||
394 | if (copy_from_user(mic_vq_configspace(mvdev->dd), | |
ced2c60f | 395 | argp, mvdev->dd->config_len)) { |
f69bcbf3 AD |
396 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
397 | __func__, __LINE__, -EFAULT); | |
398 | ret = -EFAULT; | |
399 | goto exit; | |
400 | } | |
401 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | |
402 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | |
403 | ||
9420b348 | 404 | for (retry = 100; retry--;) { |
f69bcbf3 AD |
405 | ret = wait_event_timeout(wake, |
406 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | |
407 | if (ret) | |
408 | break; | |
409 | } | |
410 | ||
411 | dev_dbg(mic_dev(mvdev), | |
412 | "%s %d retry: %d\n", __func__, __LINE__, retry); | |
413 | mvdev->dc->config_change = 0; | |
414 | mvdev->dc->guest_ack = 0; | |
415 | exit: | |
416 | for (i = 0; i < mvdev->dd->num_vq; i++) | |
417 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | |
418 | mutex_unlock(&mvdev->mdev->mic_mutex); | |
419 | return ret; | |
420 | } | |
421 | ||
422 | static int mic_copy_dp_entry(struct mic_vdev *mvdev, | |
423 | void __user *argp, | |
424 | __u8 *type, | |
425 | struct mic_device_desc **devpage) | |
426 | { | |
427 | struct mic_device *mdev = mvdev->mdev; | |
428 | struct mic_device_desc dd, *dd_config, *devp; | |
429 | struct mic_vqconfig *vqconfig; | |
430 | int ret = 0, i; | |
431 | bool slot_found = false; | |
432 | ||
433 | if (copy_from_user(&dd, argp, sizeof(dd))) { | |
434 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
435 | __func__, __LINE__, -EFAULT); | |
436 | return -EFAULT; | |
437 | } | |
438 | ||
ced2c60f AD |
439 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || |
440 | dd.num_vq > MIC_MAX_VRINGS) { | |
f69bcbf3 AD |
441 | dev_err(mic_dev(mvdev), "%s %d err %d\n", |
442 | __func__, __LINE__, -EINVAL); | |
443 | return -EINVAL; | |
444 | } | |
445 | ||
446 | dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); | |
447 | if (dd_config == NULL) { | |
448 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
449 | __func__, __LINE__, -ENOMEM); | |
450 | return -ENOMEM; | |
451 | } | |
452 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | |
453 | ret = -EFAULT; | |
454 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
455 | __func__, __LINE__, ret); | |
456 | goto exit; | |
457 | } | |
458 | ||
459 | vqconfig = mic_vq_config(dd_config); | |
460 | for (i = 0; i < dd.num_vq; i++) { | |
461 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | |
462 | ret = -EINVAL; | |
463 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
464 | __func__, __LINE__, ret); | |
465 | goto exit; | |
466 | } | |
467 | } | |
468 | ||
469 | /* Find the first free device page entry */ | |
1e31aa92 | 470 | for (i = sizeof(struct mic_bootparam); |
f69bcbf3 AD |
471 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); |
472 | i += mic_total_desc_size(devp)) { | |
473 | devp = mdev->dp + i; | |
474 | if (devp->type == 0 || devp->type == -1) { | |
475 | slot_found = true; | |
476 | break; | |
477 | } | |
478 | } | |
479 | if (!slot_found) { | |
480 | ret = -EINVAL; | |
481 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
482 | __func__, __LINE__, ret); | |
483 | goto exit; | |
484 | } | |
485 | /* | |
486 | * Save off the type before doing the memcpy. Type will be set in the | |
487 | * end after completing all initialization for the new device. | |
488 | */ | |
489 | *type = dd_config->type; | |
490 | dd_config->type = 0; | |
491 | memcpy(devp, dd_config, mic_desc_size(dd_config)); | |
492 | ||
493 | *devpage = devp; | |
494 | exit: | |
495 | kfree(dd_config); | |
496 | return ret; | |
497 | } | |
498 | ||
499 | static void mic_init_device_ctrl(struct mic_vdev *mvdev, | |
500 | struct mic_device_desc *devpage) | |
501 | { | |
502 | struct mic_device_ctrl *dc; | |
503 | ||
ced2c60f | 504 | dc = (void *)devpage + mic_aligned_desc_size(devpage); |
f69bcbf3 AD |
505 | |
506 | dc->config_change = 0; | |
507 | dc->guest_ack = 0; | |
508 | dc->vdev_reset = 0; | |
509 | dc->host_ack = 0; | |
510 | dc->used_address_updated = 0; | |
511 | dc->c2h_vdev_db = -1; | |
512 | dc->h2c_vdev_db = -1; | |
ced2c60f | 513 | mvdev->dc = dc; |
f69bcbf3 AD |
514 | } |
515 | ||
516 | int mic_virtio_add_device(struct mic_vdev *mvdev, | |
517 | void __user *argp) | |
518 | { | |
519 | struct mic_device *mdev = mvdev->mdev; | |
42579226 | 520 | struct mic_device_desc *dd = NULL; |
f69bcbf3 AD |
521 | struct mic_vqconfig *vqconfig; |
522 | int vr_size, i, j, ret; | |
42579226 | 523 | u8 type = 0; |
f69bcbf3 AD |
524 | s8 db; |
525 | char irqname[10]; | |
526 | struct mic_bootparam *bootparam = mdev->dp; | |
527 | u16 num; | |
173c0727 | 528 | dma_addr_t vr_addr; |
f69bcbf3 AD |
529 | |
530 | mutex_lock(&mdev->mic_mutex); | |
531 | ||
532 | ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); | |
533 | if (ret) { | |
534 | mutex_unlock(&mdev->mic_mutex); | |
535 | return ret; | |
536 | } | |
537 | ||
538 | mic_init_device_ctrl(mvdev, dd); | |
539 | ||
540 | mvdev->dd = dd; | |
541 | mvdev->virtio_id = type; | |
542 | vqconfig = mic_vq_config(dd); | |
543 | INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); | |
544 | ||
545 | for (i = 0; i < dd->num_vq; i++) { | |
546 | struct mic_vringh *mvr = &mvdev->mvr[i]; | |
547 | struct mic_vring *vr = &mvdev->mvr[i].vring; | |
548 | num = le16_to_cpu(vqconfig[i].num); | |
549 | mutex_init(&mvr->vr_mutex); | |
550 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | |
551 | sizeof(struct _mic_vring_info)); | |
552 | vr->va = (void *) | |
553 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
ced2c60f | 554 | get_order(vr_size)); |
f69bcbf3 AD |
555 | if (!vr->va) { |
556 | ret = -ENOMEM; | |
557 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
558 | __func__, __LINE__, ret); | |
559 | goto err; | |
560 | } | |
561 | vr->len = vr_size; | |
562 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | |
173c0727 AD |
563 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); |
564 | vr_addr = mic_map_single(mdev, vr->va, vr_size); | |
565 | if (mic_map_error(vr_addr)) { | |
ced2c60f | 566 | free_pages((unsigned long)vr->va, get_order(vr_size)); |
f69bcbf3 AD |
567 | ret = -ENOMEM; |
568 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
569 | __func__, __LINE__, ret); | |
570 | goto err; | |
571 | } | |
173c0727 | 572 | vqconfig[i].address = cpu_to_le64(vr_addr); |
f69bcbf3 | 573 | |
ced2c60f | 574 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); |
f69bcbf3 AD |
575 | ret = vringh_init_kern(&mvr->vrh, |
576 | *(u32 *)mic_vq_features(mvdev->dd), num, false, | |
577 | vr->vr.desc, vr->vr.avail, vr->vr.used); | |
578 | if (ret) { | |
579 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | |
580 | __func__, __LINE__, ret); | |
581 | goto err; | |
582 | } | |
583 | vringh_kiov_init(&mvr->riov, NULL, 0); | |
584 | vringh_kiov_init(&mvr->wiov, NULL, 0); | |
585 | mvr->head = USHRT_MAX; | |
586 | mvr->mvdev = mvdev; | |
587 | mvr->vrh.notify = mic_notify; | |
588 | dev_dbg(mdev->sdev->parent, | |
589 | "%s %d index %d va %p info %p vr_size 0x%x\n", | |
590 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | |
591 | } | |
592 | ||
ced2c60f AD |
593 | snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, |
594 | mvdev->virtio_id); | |
f69bcbf3 AD |
595 | mvdev->virtio_db = mic_next_db(mdev); |
596 | mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler, | |
597 | irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB); | |
598 | if (IS_ERR(mvdev->virtio_cookie)) { | |
599 | ret = PTR_ERR(mvdev->virtio_cookie); | |
600 | dev_dbg(mdev->sdev->parent, "request irq failed\n"); | |
601 | goto err; | |
602 | } | |
603 | ||
604 | mvdev->dc->c2h_vdev_db = mvdev->virtio_db; | |
605 | ||
606 | list_add_tail(&mvdev->list, &mdev->vdev_list); | |
607 | /* | |
608 | * Order the type update with previous stores. This write barrier | |
609 | * is paired with the corresponding read barrier before the uncached | |
610 | * system memory read of the type, on the card while scanning the | |
611 | * device page. | |
612 | */ | |
613 | smp_wmb(); | |
614 | dd->type = type; | |
615 | ||
616 | dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type); | |
617 | ||
618 | db = bootparam->h2c_config_db; | |
619 | if (db != -1) | |
620 | mdev->ops->send_intr(mdev, db); | |
621 | mutex_unlock(&mdev->mic_mutex); | |
622 | return 0; | |
623 | err: | |
624 | vqconfig = mic_vq_config(dd); | |
625 | for (j = 0; j < i; j++) { | |
626 | struct mic_vringh *mvr = &mvdev->mvr[j]; | |
627 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), | |
ced2c60f | 628 | mvr->vring.len); |
f69bcbf3 | 629 | free_pages((unsigned long)mvr->vring.va, |
ced2c60f | 630 | get_order(mvr->vring.len)); |
f69bcbf3 AD |
631 | } |
632 | mutex_unlock(&mdev->mic_mutex); | |
633 | return ret; | |
634 | } | |
635 | ||
636 | void mic_virtio_del_device(struct mic_vdev *mvdev) | |
637 | { | |
638 | struct list_head *pos, *tmp; | |
639 | struct mic_vdev *tmp_mvdev; | |
640 | struct mic_device *mdev = mvdev->mdev; | |
641 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | |
9420b348 | 642 | int i, ret, retry; |
f69bcbf3 AD |
643 | struct mic_vqconfig *vqconfig; |
644 | struct mic_bootparam *bootparam = mdev->dp; | |
645 | s8 db; | |
646 | ||
647 | mutex_lock(&mdev->mic_mutex); | |
648 | db = bootparam->h2c_config_db; | |
649 | if (db == -1) | |
650 | goto skip_hot_remove; | |
651 | dev_dbg(mdev->sdev->parent, | |
652 | "Requesting hot remove id %d\n", mvdev->virtio_id); | |
653 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | |
654 | mdev->ops->send_intr(mdev, db); | |
9420b348 | 655 | for (retry = 100; retry--;) { |
f69bcbf3 AD |
656 | ret = wait_event_timeout(wake, |
657 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | |
658 | if (ret) | |
659 | break; | |
660 | } | |
661 | dev_dbg(mdev->sdev->parent, | |
9420b348 | 662 | "Device id %d config_change %d guest_ack %d retry %d\n", |
f69bcbf3 | 663 | mvdev->virtio_id, mvdev->dc->config_change, |
9420b348 | 664 | mvdev->dc->guest_ack, retry); |
f69bcbf3 AD |
665 | mvdev->dc->config_change = 0; |
666 | mvdev->dc->guest_ack = 0; | |
667 | skip_hot_remove: | |
668 | mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); | |
669 | flush_work(&mvdev->virtio_bh_work); | |
670 | vqconfig = mic_vq_config(mvdev->dd); | |
671 | for (i = 0; i < mvdev->dd->num_vq; i++) { | |
672 | struct mic_vringh *mvr = &mvdev->mvr[i]; | |
673 | vringh_kiov_cleanup(&mvr->riov); | |
674 | vringh_kiov_cleanup(&mvr->wiov); | |
675 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), | |
ced2c60f | 676 | mvr->vring.len); |
f69bcbf3 | 677 | free_pages((unsigned long)mvr->vring.va, |
ced2c60f | 678 | get_order(mvr->vring.len)); |
f69bcbf3 AD |
679 | } |
680 | ||
681 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | |
682 | tmp_mvdev = list_entry(pos, struct mic_vdev, list); | |
683 | if (tmp_mvdev == mvdev) { | |
684 | list_del(pos); | |
685 | dev_dbg(mdev->sdev->parent, | |
686 | "Removing virtio device id %d\n", | |
687 | mvdev->virtio_id); | |
688 | break; | |
689 | } | |
690 | } | |
691 | /* | |
692 | * Order the type update with previous stores. This write barrier | |
693 | * is paired with the corresponding read barrier before the uncached | |
694 | * system memory read of the type, on the card while scanning the | |
695 | * device page. | |
696 | */ | |
697 | smp_wmb(); | |
698 | mvdev->dd->type = -1; | |
699 | mutex_unlock(&mdev->mic_mutex); | |
700 | } |