1dba7b3158c076f2429d4b8d04f8975043de3ce8
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-4.9 / 818-vfio-support-layerscape.patch
1 From 8d82d92ea697145c32bb36d9f39afd5bb0927bc2 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 10:34:46 +0800
4 Subject: [PATCH] vfio: support layerscape
5
6 This is a integrated patch for layerscape vfio support.
7
8 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
9 Signed-off-by: Eric Auger <eric.auger@redhat.com>
10 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
11 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/vfio/Kconfig | 1 +
15 drivers/vfio/Makefile | 1 +
16 drivers/vfio/fsl-mc/Kconfig | 9 +
17 drivers/vfio/fsl-mc/Makefile | 2 +
18 drivers/vfio/fsl-mc/vfio_fsl_mc.c | 753 ++++++++++++++++++++++++++++++
19 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++++
20 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 +++
21 drivers/vfio/vfio_iommu_type1.c | 39 +-
22 include/uapi/linux/vfio.h | 1 +
23 9 files changed, 1058 insertions(+), 2 deletions(-)
24 create mode 100644 drivers/vfio/fsl-mc/Kconfig
25 create mode 100644 drivers/vfio/fsl-mc/Makefile
26 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
27 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
28 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
29
30 diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
31 index da6e2ce7..8a8a33e0 100644
32 --- a/drivers/vfio/Kconfig
33 +++ b/drivers/vfio/Kconfig
34 @@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU
35
36 source "drivers/vfio/pci/Kconfig"
37 source "drivers/vfio/platform/Kconfig"
38 +source "drivers/vfio/fsl-mc/Kconfig"
39 source "virt/lib/Kconfig"
40 diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
41 index 7b8a31f6..560f0c67 100644
42 --- a/drivers/vfio/Makefile
43 +++ b/drivers/vfio/Makefile
44 @@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
45 obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
46 obj-$(CONFIG_VFIO_PCI) += pci/
47 obj-$(CONFIG_VFIO_PLATFORM) += platform/
48 +obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
49 diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig
50 new file mode 100644
51 index 00000000..b1a527d6
52 --- /dev/null
53 +++ b/drivers/vfio/fsl-mc/Kconfig
54 @@ -0,0 +1,9 @@
55 +config VFIO_FSL_MC
56 + tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
57 + depends on VFIO && FSL_MC_BUS && EVENTFD
58 + help
59 + Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
60 + (Management Complex) devices. This is required to passthrough
61 + fsl-mc bus devices using the VFIO framework.
62 +
63 + If you don't know what to do here, say N.
64 diff --git a/drivers/vfio/fsl-mc/Makefile b/drivers/vfio/fsl-mc/Makefile
65 new file mode 100644
66 index 00000000..2aca75af
67 --- /dev/null
68 +++ b/drivers/vfio/fsl-mc/Makefile
69 @@ -0,0 +1,2 @@
70 +vfio-fsl_mc-y := vfio_fsl_mc.o
71 +obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
72 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
73 new file mode 100644
74 index 00000000..9dc32d27
75 --- /dev/null
76 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
77 @@ -0,0 +1,753 @@
78 +/*
79 + * Freescale Management Complex (MC) device passthrough using VFIO
80 + *
81 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
82 + * Copyright 2016-2017 NXP
83 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
84 + *
85 + * This file is licensed under the terms of the GNU General Public
86 + * License version 2. This program is licensed "as is" without any
87 + * warranty of any kind, whether express or implied.
88 + */
89 +
90 +#include <linux/device.h>
91 +#include <linux/iommu.h>
92 +#include <linux/module.h>
93 +#include <linux/mutex.h>
94 +#include <linux/slab.h>
95 +#include <linux/types.h>
96 +#include <linux/vfio.h>
97 +#include <linux/delay.h>
98 +
99 +#include "../../staging/fsl-mc/include/mc.h"
100 +#include "../../staging/fsl-mc/include/mc-bus.h"
101 +#include "../../staging/fsl-mc/include/mc-sys.h"
102 +#include "../../staging/fsl-mc/bus/dprc-cmd.h"
103 +
104 +#include "vfio_fsl_mc_private.h"
105 +
106 +#define DRIVER_VERSION "0.10"
107 +#define DRIVER_AUTHOR "Bharat Bhushan <bharat.bhushan@nxp.com>"
108 +#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver"
109 +
110 +static DEFINE_MUTEX(driver_lock);
111 +
112 +/* FSl-MC device regions (address and size) are aligned to 64K.
113 + * While MC firmware reports size less than 64K for some objects (it actually
114 + * reports size which does not include reserved space beyond valid bytes).
115 + * Align the size to PAGE_SIZE for userspace to mmap.
116 + */
117 +static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
118 +{
119 + size_t size;
120 +
121 + size = resource_size(&mc_dev->regions[index]);
122 + return PAGE_ALIGN(size);
123 +}
124 +
125 +static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
126 +{
127 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
128 + int count = mc_dev->obj_desc.region_count;
129 + int i;
130 +
131 + vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
132 + GFP_KERNEL);
133 + if (!vdev->regions)
134 + return -ENOMEM;
135 +
136 + for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
137 + vdev->regions[i].addr = mc_dev->regions[i].start;
138 + vdev->regions[i].size = aligned_region_size(mc_dev, i);
139 + vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
140 + if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
141 + vdev->regions[i].type |=
142 + VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
143 + vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
144 + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
145 + if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
146 + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
147 + }
148 +
149 + vdev->num_regions = mc_dev->obj_desc.region_count;
150 + return 0;
151 +}
152 +
153 +static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
154 +{
155 + int i;
156 +
157 + for (i = 0; i < vdev->num_regions; i++)
158 + iounmap(vdev->regions[i].ioaddr);
159 +
160 + vdev->num_regions = 0;
161 + kfree(vdev->regions);
162 +}
163 +
164 +static int vfio_fsl_mc_open(void *device_data)
165 +{
166 + struct vfio_fsl_mc_device *vdev = device_data;
167 + int ret;
168 +
169 + if (!try_module_get(THIS_MODULE))
170 + return -ENODEV;
171 +
172 + mutex_lock(&driver_lock);
173 + if (!vdev->refcnt) {
174 + ret = vfio_fsl_mc_regions_init(vdev);
175 + if (ret)
176 + goto error_region_init;
177 +
178 + ret = vfio_fsl_mc_irqs_init(vdev);
179 + if (ret)
180 + goto error_irq_init;
181 + }
182 +
183 + vdev->refcnt++;
184 + mutex_unlock(&driver_lock);
185 + return 0;
186 +
187 +error_irq_init:
188 + vfio_fsl_mc_regions_cleanup(vdev);
189 +error_region_init:
190 + mutex_unlock(&driver_lock);
191 + if (ret)
192 + module_put(THIS_MODULE);
193 +
194 + return ret;
195 +}
196 +
197 +static void vfio_fsl_mc_release(void *device_data)
198 +{
199 + struct vfio_fsl_mc_device *vdev = device_data;
200 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
201 +
202 + mutex_lock(&driver_lock);
203 +
204 + if (!(--vdev->refcnt)) {
205 + vfio_fsl_mc_regions_cleanup(vdev);
206 + vfio_fsl_mc_irqs_cleanup(vdev);
207 + }
208 +
209 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
210 + dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
211 + mc_dev->obj_desc.id);
212 +
213 + mutex_unlock(&driver_lock);
214 +
215 + module_put(THIS_MODULE);
216 +}
217 +
218 +static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
219 + unsigned long arg)
220 +{
221 + struct vfio_fsl_mc_device *vdev = device_data;
222 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
223 + unsigned long minsz;
224 +
225 + if (WARN_ON(!mc_dev))
226 + return -ENODEV;
227 +
228 + switch (cmd) {
229 + case VFIO_DEVICE_GET_INFO:
230 + {
231 + struct vfio_device_info info;
232 +
233 + minsz = offsetofend(struct vfio_device_info, num_irqs);
234 +
235 + if (copy_from_user(&info, (void __user *)arg, minsz))
236 + return -EFAULT;
237 +
238 + if (info.argsz < minsz)
239 + return -EINVAL;
240 +
241 + info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
242 + info.num_regions = mc_dev->obj_desc.region_count;
243 + info.num_irqs = mc_dev->obj_desc.irq_count;
244 +
245 + return copy_to_user((void __user *)arg, &info, minsz);
246 + }
247 + case VFIO_DEVICE_GET_REGION_INFO:
248 + {
249 + struct vfio_region_info info;
250 +
251 + minsz = offsetofend(struct vfio_region_info, offset);
252 +
253 + if (copy_from_user(&info, (void __user *)arg, minsz))
254 + return -EFAULT;
255 +
256 + if (info.argsz < minsz)
257 + return -EINVAL;
258 +
259 + if (info.index >= vdev->num_regions)
260 + return -EINVAL;
261 +
262 + /* map offset to the physical address */
263 + info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
264 + info.size = vdev->regions[info.index].size;
265 + info.flags = vdev->regions[info.index].flags;
266 +
267 + return copy_to_user((void __user *)arg, &info, minsz);
268 + }
269 + case VFIO_DEVICE_GET_IRQ_INFO:
270 + {
271 + struct vfio_irq_info info;
272 +
273 + minsz = offsetofend(struct vfio_irq_info, count);
274 + if (copy_from_user(&info, (void __user *)arg, minsz))
275 + return -EFAULT;
276 +
277 + if (info.argsz < minsz)
278 + return -EINVAL;
279 +
280 + if (info.index >= mc_dev->obj_desc.irq_count)
281 + return -EINVAL;
282 +
283 + if (vdev->mc_irqs != NULL) {
284 + info.flags = vdev->mc_irqs[info.index].flags;
285 + info.count = vdev->mc_irqs[info.index].count;
286 + } else {
287 + /*
288 + * If IRQs are not initialized then these can not
289 + * be configuted and used by user-space/
290 + */
291 + info.flags = 0;
292 + info.count = 0;
293 + }
294 +
295 + return copy_to_user((void __user *)arg, &info, minsz);
296 + }
297 + case VFIO_DEVICE_SET_IRQS:
298 + {
299 + struct vfio_irq_set hdr;
300 + u8 *data = NULL;
301 + int ret = 0;
302 +
303 + minsz = offsetofend(struct vfio_irq_set, count);
304 +
305 + if (copy_from_user(&hdr, (void __user *)arg, minsz))
306 + return -EFAULT;
307 +
308 + if (hdr.argsz < minsz)
309 + return -EINVAL;
310 +
311 + if (hdr.index >= mc_dev->obj_desc.irq_count)
312 + return -EINVAL;
313 +
314 + if (hdr.start != 0 || hdr.count > 1)
315 + return -EINVAL;
316 +
317 + if (hdr.count == 0 &&
318 + (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
319 + !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
320 + return -EINVAL;
321 +
322 + if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
323 + VFIO_IRQ_SET_ACTION_TYPE_MASK))
324 + return -EINVAL;
325 +
326 + if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
327 + size_t size;
328 +
329 + if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
330 + size = sizeof(uint8_t);
331 + else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
332 + size = sizeof(int32_t);
333 + else
334 + return -EINVAL;
335 +
336 + if (hdr.argsz - minsz < hdr.count * size)
337 + return -EINVAL;
338 +
339 + data = memdup_user((void __user *)(arg + minsz),
340 + hdr.count * size);
341 + if (IS_ERR(data))
342 + return PTR_ERR(data);
343 + }
344 +
345 + ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
346 + hdr.index, hdr.start,
347 + hdr.count, data);
348 + return ret;
349 + }
350 + case VFIO_DEVICE_RESET:
351 + {
352 + return -EINVAL;
353 + }
354 + default:
355 + return -EINVAL;
356 + }
357 +}
358 +
359 +static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
360 + size_t count, loff_t *ppos)
361 +{
362 + struct vfio_fsl_mc_device *vdev = device_data;
363 + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
364 + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
365 + struct vfio_fsl_mc_region *region;
366 + uint64_t data[8];
367 + int i;
368 +
369 + /* Read ioctl supported only for DPRC device */
370 + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
371 + return -EINVAL;
372 +
373 + if (index >= vdev->num_regions)
374 + return -EINVAL;
375 +
376 + region = &vdev->regions[index];
377 +
378 + if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
379 + return -EINVAL;
380 +
381 + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
382 + return -EINVAL;
383 +
384 + if (!region->ioaddr) {
385 + region->ioaddr = ioremap_nocache(region->addr, region->size);
386 + if (!region->ioaddr)
387 + return -ENOMEM;
388 + }
389 +
390 + if (count != 64 || off != 0)
391 + return -EINVAL;
392 +
393 + for (i = 7; i >= 0; i--)
394 + data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
395 +
396 + if (copy_to_user(buf, data, 64))
397 + return -EFAULT;
398 +
399 + return count;
400 +}
401 +
402 +#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
403 +#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
404 +
405 +static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
406 +{
407 + enum mc_cmd_status status;
408 + unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
409 +
410 + for (;;) {
411 + u64 header;
412 + struct mc_cmd_header *resp_hdr;
413 +
414 + __iormb();
415 + header = readq(ioaddr);
416 + __iormb();
417 +
418 + resp_hdr = (struct mc_cmd_header *)&header;
419 + status = (enum mc_cmd_status)resp_hdr->status;
420 + if (status != MC_CMD_STATUS_READY)
421 + break;
422 +
423 + udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
424 + timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
425 + if (timeout_usecs == 0)
426 + return -ETIMEDOUT;
427 + }
428 +
429 + return 0;
430 +}
431 +
432 +static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
433 +{
434 + int i;
435 +
436 + /* Write at command header in the end */
437 + for (i = 7; i >= 0; i--)
438 + writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
439 +
440 + /* Wait for response before returning to user-space
441 + * This can be optimized in future to even prepare response
442 + * before returning to user-space and avoid read ioctl.
443 + */
444 + return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
445 +}
446 +
447 +static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
448 +{
449 + uint64_t cmd_hdr = cmd_data[0];
450 + int cmd = (cmd_hdr >> 52) & 0xfff;
451 +
452 + switch (cmd) {
453 + case DPRC_CMDID_OPEN:
454 + default:
455 + return vfio_fsl_mc_send_command(ioaddr, cmd_data);
456 + }
457 +
458 + return 0;
459 +}
460 +
461 +static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
462 + size_t count, loff_t *ppos)
463 +{
464 + struct vfio_fsl_mc_device *vdev = device_data;
465 + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
466 + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
467 + struct vfio_fsl_mc_region *region;
468 + uint64_t data[8];
469 + int ret;
470 +
471 + /* Write ioctl supported only for DPRC device */
472 + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
473 + return -EINVAL;
474 +
475 + if (index >= vdev->num_regions)
476 + return -EINVAL;
477 +
478 + region = &vdev->regions[index];
479 +
480 + if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
481 + return -EINVAL;
482 +
483 + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
484 + return -EINVAL;
485 +
486 + if (!region->ioaddr) {
487 + region->ioaddr = ioremap_nocache(region->addr, region->size);
488 + if (!region->ioaddr)
489 + return -ENOMEM;
490 + }
491 +
492 + if (count != 64 || off != 0)
493 + return -EINVAL;
494 +
495 + if (copy_from_user(&data, buf, 64))
496 + return -EFAULT;
497 +
498 + ret = vfio_handle_dprc_commands(region->ioaddr, data);
499 + if (ret)
500 + return ret;
501 +
502 + return count;
503 +}
504 +
505 +static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
506 + struct vm_area_struct *vma)
507 +{
508 + u64 size = vma->vm_end - vma->vm_start;
509 + u64 pgoff, base;
510 +
511 + pgoff = vma->vm_pgoff &
512 + ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
513 + base = pgoff << PAGE_SHIFT;
514 +
515 + if (region.size < PAGE_SIZE || base + size > region.size)
516 + return -EINVAL;
517 + /*
518 + * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
519 + * cache inhibited area of the portal to avoid coherency issues
520 + * if a user migrates to another core.
521 + */
522 + if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
523 + vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
524 + else
525 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
526 +
527 + vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
528 +
529 + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
530 + size, vma->vm_page_prot);
531 +}
532 +
533 +/* Allows mmaping fsl_mc device regions in assigned DPRC */
534 +static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
535 +{
536 + struct vfio_fsl_mc_device *vdev = device_data;
537 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
538 + unsigned long size, addr;
539 + int index;
540 +
541 + index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
542 +
543 + if (vma->vm_end < vma->vm_start)
544 + return -EINVAL;
545 + if (vma->vm_start & ~PAGE_MASK)
546 + return -EINVAL;
547 + if (vma->vm_end & ~PAGE_MASK)
548 + return -EINVAL;
549 + if (!(vma->vm_flags & VM_SHARED))
550 + return -EINVAL;
551 + if (index >= vdev->num_regions)
552 + return -EINVAL;
553 +
554 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
555 + return -EINVAL;
556 +
557 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
558 + && (vma->vm_flags & VM_READ))
559 + return -EINVAL;
560 +
561 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
562 + && (vma->vm_flags & VM_WRITE))
563 + return -EINVAL;
564 +
565 + addr = vdev->regions[index].addr;
566 + size = vdev->regions[index].size;
567 +
568 + vma->vm_private_data = mc_dev;
569 +
570 + if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
571 + return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
572 +
573 + return -EFAULT;
574 +}
575 +
576 +static const struct vfio_device_ops vfio_fsl_mc_ops = {
577 + .name = "vfio-fsl-mc",
578 + .open = vfio_fsl_mc_open,
579 + .release = vfio_fsl_mc_release,
580 + .ioctl = vfio_fsl_mc_ioctl,
581 + .read = vfio_fsl_mc_read,
582 + .write = vfio_fsl_mc_write,
583 + .mmap = vfio_fsl_mc_mmap,
584 +};
585 +
586 +static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
587 +{
588 + struct device *root_dprc_dev;
589 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
590 + struct device *dev = &mc_dev->dev;
591 + struct fsl_mc_bus *mc_bus;
592 + struct irq_domain *mc_msi_domain;
593 + unsigned int irq_count;
594 + int ret;
595 +
596 + /* device must be DPRC */
597 + if (strcmp(mc_dev->obj_desc.type, "dprc"))
598 + return -EINVAL;
599 +
600 + /* mc_io must be un-initialized */
601 + WARN_ON(mc_dev->mc_io);
602 +
603 + /* allocate a portal from the root DPRC for vfio use */
604 + fsl_mc_get_root_dprc(dev, &root_dprc_dev);
605 + if (WARN_ON(!root_dprc_dev))
606 + return -EINVAL;
607 +
608 + ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
609 + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
610 + &mc_dev->mc_io);
611 + if (ret < 0)
612 + goto clean_msi_domain;
613 +
614 + /* Reset MCP before move on */
615 + ret = fsl_mc_portal_reset(mc_dev->mc_io);
616 + if (ret < 0) {
617 + dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
618 + goto free_mc_portal;
619 + }
620 +
621 + /* MSI domain set up */
622 + ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
623 + if (ret < 0)
624 + goto free_mc_portal;
625 +
626 + dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
627 +
628 + ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
629 + &mc_dev->mc_handle);
630 + if (ret) {
631 + dev_err(dev, "dprc_open() failed: error = %d\n", ret);
632 + goto free_mc_portal;
633 + }
634 +
635 + /* Initialize resource pool */
636 + fsl_mc_init_all_resource_pools(mc_dev);
637 +
638 + mc_bus = to_fsl_mc_bus(mc_dev);
639 +
640 + if (!mc_bus->irq_resources) {
641 + irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
642 + ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
643 + if (ret < 0) {
644 + dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
645 + goto clean_resource_pool;
646 + }
647 + }
648 +
649 + mutex_init(&mc_bus->scan_mutex);
650 +
651 + mutex_lock(&mc_bus->scan_mutex);
652 + ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
653 + &irq_count);
654 + mutex_unlock(&mc_bus->scan_mutex);
655 + if (ret) {
656 + dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
657 + goto clean_irq_pool;
658 + }
659 +
660 + if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
661 + dev_warn(&mc_dev->dev,
662 + "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
663 + irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
664 + }
665 +
666 + return 0;
667 +
668 +clean_irq_pool:
669 + fsl_mc_cleanup_irq_pool(mc_bus);
670 +
671 +clean_resource_pool:
672 + fsl_mc_cleanup_all_resource_pools(mc_dev);
673 + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
674 +
675 +free_mc_portal:
676 + fsl_mc_portal_free(mc_dev->mc_io);
677 +
678 +clean_msi_domain:
679 + dev_set_msi_domain(&mc_dev->dev, NULL);
680 +
681 + return ret;
682 +}
683 +
684 +static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
685 +{
686 + struct fsl_mc_device *mc_dev;
687 +
688 + WARN_ON(dev == NULL);
689 +
690 + mc_dev = to_fsl_mc_device(dev);
691 + if (WARN_ON(mc_dev == NULL))
692 + return -ENODEV;
693 +
694 + fsl_mc_device_remove(mc_dev);
695 + return 0;
696 +}
697 +
698 +static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
699 +{
700 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
701 + struct fsl_mc_bus *mc_bus;
702 +
703 + /* device must be DPRC */
704 + if (strcmp(mc_dev->obj_desc.type, "dprc"))
705 + return;
706 +
707 + device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
708 +
709 + mc_bus = to_fsl_mc_bus(mc_dev);
710 + if (dev_get_msi_domain(&mc_dev->dev))
711 + fsl_mc_cleanup_irq_pool(mc_bus);
712 +
713 + dev_set_msi_domain(&mc_dev->dev, NULL);
714 +
715 + fsl_mc_cleanup_all_resource_pools(mc_dev);
716 + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
717 + fsl_mc_portal_free(mc_dev->mc_io);
718 +}
719 +
720 +static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
721 +{
722 + struct iommu_group *group;
723 + struct vfio_fsl_mc_device *vdev;
724 + struct device *dev = &mc_dev->dev;
725 + int ret;
726 +
727 + group = vfio_iommu_group_get(dev);
728 + if (!group) {
729 + dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
730 + return -EINVAL;
731 + }
732 +
733 + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
734 + if (!vdev) {
735 + vfio_iommu_group_put(group, dev);
736 + return -ENOMEM;
737 + }
738 +
739 + vdev->mc_dev = mc_dev;
740 +
741 + ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
742 + if (ret) {
743 + dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
744 + goto free_vfio_device;
745 + }
746 +
747 + /* DPRC container scanned and it's chilren bound with vfio driver */
748 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
749 + ret = vfio_fsl_mc_initialize_dprc(vdev);
750 + if (ret) {
751 + vfio_del_group_dev(dev);
752 + goto free_vfio_device;
753 + }
754 + } else {
755 + struct fsl_mc_device *mc_bus_dev;
756 +
757 + /* Non-dprc devices share mc_io from the parent dprc */
758 + mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
759 + if (mc_bus_dev == NULL) {
760 + vfio_del_group_dev(dev);
761 + goto free_vfio_device;
762 + }
763 +
764 + mc_dev->mc_io = mc_bus_dev->mc_io;
765 +
766 + /* Inherit parent MSI domain */
767 + dev_set_msi_domain(&mc_dev->dev,
768 + dev_get_msi_domain(mc_dev->dev.parent));
769 + }
770 + return 0;
771 +
772 +free_vfio_device:
773 + kfree(vdev);
774 + vfio_iommu_group_put(group, dev);
775 + return ret;
776 +}
777 +
778 +static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
779 +{
780 + struct vfio_fsl_mc_device *vdev;
781 + struct device *dev = &mc_dev->dev;
782 +
783 + vdev = vfio_del_group_dev(dev);
784 + if (!vdev)
785 + return -EINVAL;
786 +
787 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
788 + vfio_fsl_mc_cleanup_dprc(vdev);
789 + else
790 + dev_set_msi_domain(&mc_dev->dev, NULL);
791 +
792 + mc_dev->mc_io = NULL;
793 +
794 + vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
795 + kfree(vdev);
796 +
797 + return 0;
798 +}
799 +
800 +/*
801 + * vfio-fsl_mc is a meta-driver, so use driver_override interface to
802 + * bind a fsl_mc container with this driver and match_id_table is NULL.
803 + */
804 +static struct fsl_mc_driver vfio_fsl_mc_driver = {
805 + .probe = vfio_fsl_mc_probe,
806 + .remove = vfio_fsl_mc_remove,
807 + .match_id_table = NULL,
808 + .driver = {
809 + .name = "vfio-fsl-mc",
810 + .owner = THIS_MODULE,
811 + },
812 +};
813 +
814 +static int __init vfio_fsl_mc_driver_init(void)
815 +{
816 + return fsl_mc_driver_register(&vfio_fsl_mc_driver);
817 +}
818 +
819 +static void __exit vfio_fsl_mc_driver_exit(void)
820 +{
821 + fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
822 +}
823 +
824 +module_init(vfio_fsl_mc_driver_init);
825 +module_exit(vfio_fsl_mc_driver_exit);
826 +
827 +MODULE_VERSION(DRIVER_VERSION);
828 +MODULE_LICENSE("GPL v2");
829 +MODULE_AUTHOR(DRIVER_AUTHOR);
830 +MODULE_DESCRIPTION(DRIVER_DESC);
831 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
832 new file mode 100644
833 index 00000000..eb244bb0
834 --- /dev/null
835 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
836 @@ -0,0 +1,199 @@
837 +/*
838 + * Freescale Management Complex (MC) device passthrough using VFIO
839 + *
840 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
841 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
842 + *
843 + * This file is licensed under the terms of the GNU General Public
844 + * License version 2. This program is licensed "as is" without any
845 + * warranty of any kind, whether express or implied.
846 + */
847 +
848 +#include <linux/vfio.h>
849 +#include <linux/slab.h>
850 +#include <linux/types.h>
851 +#include <linux/eventfd.h>
852 +#include <linux/msi.h>
853 +
854 +#include "../../staging/fsl-mc/include/mc.h"
855 +#include "vfio_fsl_mc_private.h"
856 +
857 +static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
858 +{
859 + struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
860 +
861 + eventfd_signal(mc_irq->trigger, 1);
862 + return IRQ_HANDLED;
863 +}
864 +
865 +static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
866 + unsigned int index, unsigned int start,
867 + unsigned int count, uint32_t flags,
868 + void *data)
869 +{
870 + return -EINVAL;
871 +}
872 +
873 +static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
874 + unsigned int index, unsigned int start,
875 + unsigned int count, uint32_t flags,
876 + void *data)
877 +{
878 + return -EINVAL;
879 +}
880 +
881 +static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
882 + int index, int fd)
883 +{
884 + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
885 + struct eventfd_ctx *trigger;
886 + int hwirq;
887 + int ret;
888 +
889 + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
890 + if (irq->trigger) {
891 + free_irq(hwirq, irq);
892 + kfree(irq->name);
893 + eventfd_ctx_put(irq->trigger);
894 + irq->trigger = NULL;
895 + }
896 +
897 + if (fd < 0) /* Disable only */
898 + return 0;
899 +
900 + irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
901 + hwirq, dev_name(&vdev->mc_dev->dev));
902 + if (!irq->name)
903 + return -ENOMEM;
904 +
905 + trigger = eventfd_ctx_fdget(fd);
906 + if (IS_ERR(trigger)) {
907 + kfree(irq->name);
908 + return PTR_ERR(trigger);
909 + }
910 +
911 + irq->trigger = trigger;
912 +
913 + ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
914 + irq->name, irq);
915 + if (ret) {
916 + kfree(irq->name);
917 + eventfd_ctx_put(trigger);
918 + irq->trigger = NULL;
919 + return ret;
920 + }
921 +
922 + return 0;
923 +}
924 +
925 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
926 +{
927 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
928 + struct vfio_fsl_mc_irq *mc_irq;
929 + int irq_count;
930 + int ret, i;
931 +
932 + /* Device does not support any interrupt */
933 + if (mc_dev->obj_desc.irq_count == 0)
934 + return 0;
935 +
936 + irq_count = mc_dev->obj_desc.irq_count;
937 +
938 + mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
939 + if (mc_irq == NULL)
940 + return -ENOMEM;
941 +
942 + /* Allocate IRQs */
943 + ret = fsl_mc_allocate_irqs(mc_dev);
944 + if (ret) {
945 + kfree(mc_irq);
946 + return ret;
947 + }
948 +
949 + for (i = 0; i < irq_count; i++) {
950 + mc_irq[i].count = 1;
951 + mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
952 + }
953 +
954 + vdev->mc_irqs = mc_irq;
955 +
956 + return 0;
957 +}
958 +
959 +/* Free All IRQs for the given MC object */
960 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
961 +{
962 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
963 + int irq_count = mc_dev->obj_desc.irq_count;
964 + int i;
965 +
966 + /* Device does not support any interrupt */
967 + if (mc_dev->obj_desc.irq_count == 0)
968 + return;
969 +
970 + for (i = 0; i < irq_count; i++)
971 + vfio_set_trigger(vdev, i, -1);
972 +
973 + fsl_mc_free_irqs(mc_dev);
974 + kfree(vdev->mc_irqs);
975 +}
976 +
977 +static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
978 + unsigned int index, unsigned int start,
979 + unsigned int count, uint32_t flags,
980 + void *data)
981 +{
982 + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
983 + int hwirq;
984 +
985 + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
986 + return vfio_set_trigger(vdev, index, -1);
987 +
988 + if (start != 0 || count != 1)
989 + return -EINVAL;
990 +
991 + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
992 + int32_t fd = *(int32_t *)data;
993 +
994 + return vfio_set_trigger(vdev, index, fd);
995 + }
996 +
997 + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
998 +
999 + if (flags & VFIO_IRQ_SET_DATA_NONE) {
1000 + vfio_fsl_mc_irq_handler(hwirq, irq);
1001 +
1002 + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
1003 + uint8_t trigger = *(uint8_t *)data;
1004 +
1005 + if (trigger)
1006 + vfio_fsl_mc_irq_handler(hwirq, irq);
1007 + }
1008 +
1009 + return 0;
1010 +}
1011 +
1012 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
1013 + uint32_t flags, unsigned int index,
1014 + unsigned int start, unsigned int count,
1015 + void *data)
1016 +{
1017 + int ret = -ENOTTY;
1018 +
1019 + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1020 + case VFIO_IRQ_SET_ACTION_MASK:
1021 + ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
1022 + flags, data);
1023 + break;
1024 + case VFIO_IRQ_SET_ACTION_UNMASK:
1025 + ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
1026 + flags, data);
1027 + break;
1028 + case VFIO_IRQ_SET_ACTION_TRIGGER:
1029 + ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
1030 + count, flags, data);
1031 + break;
1032 + }
1033 +
1034 + return ret;
1035 +}
1036 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
1037 new file mode 100644
1038 index 00000000..34e75754
1039 --- /dev/null
1040 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
1041 @@ -0,0 +1,55 @@
1042 +/*
1043 + * Freescale Management Complex VFIO private declarations
1044 + *
1045 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
1046 + * Copyright 2016 NXP
1047 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
1048 + *
1049 + * This file is licensed under the terms of the GNU General Public
1050 + * License version 2. This program is licensed "as is" without any
1051 + * warranty of any kind, whether express or implied.
1052 + */
1053 +
1054 +#ifndef VFIO_FSL_MC_PRIVATE_H
1055 +#define VFIO_FSL_MC_PRIVATE_H
1056 +
1057 +#define VFIO_FSL_MC_OFFSET_SHIFT 40
1058 +#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
1059 +
1060 +#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
1061 +
1062 +#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
1063 + ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
1064 +
1065 +struct vfio_fsl_mc_irq {
1066 + u32 flags;
1067 + u32 count;
1068 + struct eventfd_ctx *trigger;
1069 + char *name;
1070 +};
1071 +
1072 +struct vfio_fsl_mc_region {
1073 + u32 flags;
1074 +#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
1075 +#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
1076 + u32 type;
1077 + u64 addr;
1078 + resource_size_t size;
1079 + void __iomem *ioaddr;
1080 +};
1081 +
1082 +struct vfio_fsl_mc_device {
1083 + struct fsl_mc_device *mc_dev;
1084 + int refcnt;
1085 + u32 num_regions;
1086 + struct vfio_fsl_mc_region *regions;
1087 + struct vfio_fsl_mc_irq *mc_irqs;
1088 +};
1089 +
1090 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
1091 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
1092 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
1093 + uint32_t flags, unsigned int index,
1094 + unsigned int start, unsigned int count,
1095 + void *data);
1096 +#endif /* VFIO_PCI_PRIVATE_H */
1097 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1098 index 1d48e62f..f0a39331 100644
1099 --- a/drivers/vfio/vfio_iommu_type1.c
1100 +++ b/drivers/vfio/vfio_iommu_type1.c
1101 @@ -36,6 +36,8 @@
1102 #include <linux/uaccess.h>
1103 #include <linux/vfio.h>
1104 #include <linux/workqueue.h>
1105 +#include <linux/dma-iommu.h>
1106 +#include <linux/irqdomain.h>
1107
1108 #define DRIVER_VERSION "0.2"
1109 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
1110 @@ -720,6 +722,27 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
1111 __free_pages(pages, order);
1112 }
1113
1114 +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1115 +{
1116 + struct list_head group_resv_regions;
1117 + struct iommu_resv_region *region, *next;
1118 + bool ret = false;
1119 +
1120 + INIT_LIST_HEAD(&group_resv_regions);
1121 + iommu_get_group_resv_regions(group, &group_resv_regions);
1122 + list_for_each_entry(region, &group_resv_regions, list) {
1123 + if (region->type == IOMMU_RESV_SW_MSI) {
1124 + *base = region->start;
1125 + ret = true;
1126 + goto out;
1127 + }
1128 + }
1129 +out:
1130 + list_for_each_entry_safe(region, next, &group_resv_regions, list)
1131 + kfree(region);
1132 + return ret;
1133 +}
1134 +
1135 static int vfio_iommu_type1_attach_group(void *iommu_data,
1136 struct iommu_group *iommu_group)
1137 {
1138 @@ -728,6 +751,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1139 struct vfio_domain *domain, *d;
1140 struct bus_type *bus = NULL;
1141 int ret;
1142 + bool resv_msi, msi_remap;
1143 + phys_addr_t resv_msi_base;
1144
1145 mutex_lock(&iommu->lock);
1146
1147 @@ -774,11 +799,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1148 if (ret)
1149 goto out_domain;
1150
1151 + resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1152 +
1153 INIT_LIST_HEAD(&domain->group_list);
1154 list_add(&group->next, &domain->group_list);
1155
1156 - if (!allow_unsafe_interrupts &&
1157 - !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
1158 + msi_remap = resv_msi ? irq_domain_check_msi_remap() :
1159 + iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
1160 +
1161 + if (!allow_unsafe_interrupts && !msi_remap) {
1162 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1163 __func__);
1164 ret = -EPERM;
1165 @@ -820,6 +849,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
1166 if (ret)
1167 goto out_detach;
1168
1169 + if (resv_msi) {
1170 + ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1171 + if (ret)
1172 + goto out_detach;
1173 + }
1174 +
1175 list_add(&domain->next, &iommu->domain_list);
1176
1177 mutex_unlock(&iommu->lock);
1178 diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
1179 index 255a2113..1bbaa13d 100644
1180 --- a/include/uapi/linux/vfio.h
1181 +++ b/include/uapi/linux/vfio.h
1182 @@ -198,6 +198,7 @@ struct vfio_device_info {
1183 #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
1184 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
1185 #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
1186 +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */
1187 __u32 num_regions; /* Max region index + 1 */
1188 __u32 num_irqs; /* Max IRQ index + 1 */
1189 };
1190 --
1191 2.14.1
1192