diff options
Diffstat (limited to 'drivers')
96 files changed, 7184 insertions, 801 deletions
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 0cf51d57e4ae..3ece711a6a17 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -102,8 +102,8 @@ static ssize_t driver_override_store(struct device *_dev, if (strlen(driver_override)) { dev->driver_override = driver_override; } else { - kfree(driver_override); - dev->driver_override = NULL; + kfree(driver_override); + dev->driver_override = NULL; } device_unlock(_dev); diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 7dce3795b887..ee4880bfdcdc 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -10,7 +10,7 @@ if ANDROID config ANDROID_BINDER_IPC bool "Android Binder IPC Driver" - depends on MMU + depends on MMU && !M68K default n ---help--- Binder is used in Android for both communication between processes, @@ -32,19 +32,6 @@ config ANDROID_BINDER_DEVICES created. Each binder device has its own context manager, and is therefore logically separated from the other devices. -config ANDROID_BINDER_IPC_32BIT - bool "Use old (Android 4.4 and earlier) 32-bit binder API" - depends on !64BIT && ANDROID_BINDER_IPC - default y - ---help--- - The Binder API has been changed to support both 32 and 64bit - applications in a mixed environment. - - Enable this to support an old 32-bit Android user-space (v4.4 and - earlier). - - Note that enabling this will break newer Android user-space. - config ANDROID_BINDER_IPC_SELFTEST bool "Android Binder IPC Driver Selftest" depends on ANDROID_BINDER_IPC diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e578eee31589..95283f3bb51c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -72,10 +72,6 @@ #include <linux/security.h> #include <linux/spinlock.h> -#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT -#define BINDER_IPC_32BIT 1 -#endif - #include <uapi/linux/android/binder.h> #include "binder_alloc.h" #include "binder_trace.h" @@ -2058,8 +2054,8 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) struct binder_object_header *hdr; size_t object_size = 0; - if (offset > buffer->data_size - sizeof(*hdr) || - buffer->data_size < sizeof(*hdr) || + if (buffer->data_size < sizeof(*hdr) || + offset > buffer->data_size - sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; @@ -3925,10 +3921,11 @@ retry: binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; + cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, e->cmd); + binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); @@ -4696,7 +4693,7 @@ static void binder_vma_close(struct vm_area_struct *vma) binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } -static int binder_vm_fault(struct vm_fault *vmf) +static vm_fault_t binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } @@ -4730,7 +4727,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "bad vm_flags"; goto err_bad_arg; } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; + vma->vm_flags &= ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 5a426c877dfb..4f382d51def1 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -219,7 +219,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, mm = alloc->vma_vm_mm; if (mm) { - down_write(&mm->mmap_sem); + down_read(&mm->mmap_sem); vma = alloc->vma; } @@ -288,7 +288,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, /* vm_insert_page does not seem to increment the refcount */ } if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return 0; @@ -321,7 +321,7 @@ err_page_ptr_cleared: } err_no_vma: if (mm) { - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 7b75669d3670..058876b55b09 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -191,7 +191,7 @@ mspec_close(struct vm_area_struct *vma) * * Creates a mspec page and maps it to user space. */ -static int +static vm_fault_t mspec_fault(struct vm_fault *vmf) { unsigned long paddr, maddr; @@ -223,14 +223,7 @@ mspec_fault(struct vm_fault *vmf) pfn = paddr >> PAGE_SHIFT; - /* - * vm_insert_pfn can fail with -EBUSY, but in that case it will - * be because another thread has installed the pte first, so it - * is no problem. - */ - vm_insert_pfn(vmf->vma, vmf->address, pfn); - - return VM_FAULT_NOPAGE; + return vmf_insert_pfn(vmf->vma, vmf->address, pfn); } static const struct vm_operations_struct mspec_vm_ops = { diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index f16b381a569c..a456a000048b 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -55,6 +55,14 @@ config GOOGLE_MEMCONSOLE_X86_LEGACY the EBDA on Google servers. If found, this log is exported to userland in the file /sys/firmware/log. +config GOOGLE_FRAMEBUFFER_COREBOOT + tristate "Coreboot Framebuffer" + depends on FB_SIMPLE + depends on GOOGLE_COREBOOT_TABLE + help + This option enables the kernel to search for a framebuffer in + the coreboot table. If found, it is registered with simplefb. + config GOOGLE_MEMCONSOLE_COREBOOT tristate "Firmware Memory Console" depends on GOOGLE_COREBOOT_TABLE diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile index dcd3675efcfc..d0b3fba96194 100644 --- a/drivers/firmware/google/Makefile +++ b/drivers/firmware/google/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_GOOGLE_SMI) += gsmi.o obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI) += coreboot_table-acpi.o obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF) += coreboot_table-of.o +obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o obj-$(CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY) += memconsole-x86-legacy.o diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c index fb98db2d20e2..77197fe3d42f 100644 --- a/drivers/firmware/google/coreboot_table-acpi.c +++ b/drivers/firmware/google/coreboot_table-acpi.c @@ -53,7 +53,7 @@ static int coreboot_table_acpi_probe(struct platform_device *pdev) if (!ptr) return -ENOMEM; - return coreboot_table_init(ptr); + return coreboot_table_init(&pdev->dev, ptr); } static int coreboot_table_acpi_remove(struct platform_device *pdev) diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c index 727acdc83e83..f15bf404c579 100644 --- a/drivers/firmware/google/coreboot_table-of.c +++ b/drivers/firmware/google/coreboot_table-of.c @@ -34,7 +34,7 @@ static int coreboot_table_of_probe(struct platform_device *pdev) if (!ptr) return -ENOMEM; - return coreboot_table_init(ptr); + return coreboot_table_init(&pdev->dev, ptr); } static int coreboot_table_of_remove(struct platform_device *pdev) diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index 0019d3ec18dd..19db5709ae28 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -4,6 +4,7 @@ * Module providing coreboot table access. * * Copyright 2017 Google Inc. + * Copyright 2017 Samuel Holland <samuel@sholland.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by @@ -15,37 +16,96 @@ * GNU General Public License for more details. */ +#include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include "coreboot_table.h" -struct coreboot_table_entry { - u32 tag; - u32 size; -}; +#define CB_DEV(d) container_of(d, struct coreboot_device, dev) +#define CB_DRV(d) container_of(d, struct coreboot_driver, drv) static struct coreboot_table_header __iomem *ptr_header; -/* - * This function parses the coreboot table for an entry that contains the base - * address of the given entry tag. The coreboot table consists of a header - * directly followed by a number of small, variable-sized entries, which each - * contain an identifying tag and their length as the first two fields. - */ -int coreboot_table_find(int tag, void *data, size_t data_size) +static int coreboot_bus_match(struct device *dev, struct device_driver *drv) { - struct coreboot_table_header header; - struct coreboot_table_entry entry; - void *ptr_entry; - int i; + struct coreboot_device *device = CB_DEV(dev); + struct coreboot_driver *driver = CB_DRV(drv); - if (!ptr_header) - return -EPROBE_DEFER; + return device->entry.tag == driver->tag; +} +static int coreboot_bus_probe(struct device *dev) +{ + int ret = -ENODEV; + struct coreboot_device *device = CB_DEV(dev); + struct coreboot_driver *driver = CB_DRV(dev->driver); + + if (driver->probe) + ret = driver->probe(device); + + return ret; +} + +static int coreboot_bus_remove(struct device *dev) +{ + int ret = 0; + struct coreboot_device *device = CB_DEV(dev); + struct coreboot_driver *driver = CB_DRV(dev->driver); + + if (driver->remove) + ret = driver->remove(device); + + return ret; +} + +static struct bus_type coreboot_bus_type = { + .name = "coreboot", + .match = coreboot_bus_match, + .probe = coreboot_bus_probe, + .remove = coreboot_bus_remove, +}; + +static int __init coreboot_bus_init(void) +{ + return bus_register(&coreboot_bus_type); +} +module_init(coreboot_bus_init); + +static void coreboot_device_release(struct device *dev) +{ + struct coreboot_device *device = CB_DEV(dev); + + kfree(device); +} + +int coreboot_driver_register(struct coreboot_driver *driver) +{ + driver->drv.bus = &coreboot_bus_type; + + return driver_register(&driver->drv); +} +EXPORT_SYMBOL(coreboot_driver_register); + +void coreboot_driver_unregister(struct coreboot_driver *driver) +{ + driver_unregister(&driver->drv); +} +EXPORT_SYMBOL(coreboot_driver_unregister); + +int coreboot_table_init(struct device *dev, void __iomem *ptr) +{ + int i, ret; + void *ptr_entry; + struct coreboot_device *device; + struct coreboot_table_entry entry; + struct coreboot_table_header header; + + ptr_header = ptr; memcpy_fromio(&header, ptr_header, sizeof(header)); if (strncmp(header.signature, "LBIO", sizeof(header.signature))) { @@ -54,37 +114,41 @@ int coreboot_table_find(int tag, void *data, size_t data_size) } ptr_entry = (void *)ptr_header + header.header_bytes; - for (i = 0; i < header.table_entries; i++) { memcpy_fromio(&entry, ptr_entry, sizeof(entry)); - if (entry.tag == tag) { - if (data_size < entry.size) - return -EINVAL; - memcpy_fromio(data, ptr_entry, entry.size); + device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL); + if (!device) { + ret = -ENOMEM; + break; + } + + dev_set_name(&device->dev, "coreboot%d", i); + device->dev.parent = dev; + device->dev.bus = &coreboot_bus_type; + device->dev.release = coreboot_device_release; + memcpy_fromio(&device->entry, ptr_entry, entry.size); - return 0; + ret = device_register(&device->dev); + if (ret) { + put_device(&device->dev); + break; } ptr_entry += entry.size; } - return -ENOENT; -} -EXPORT_SYMBOL(coreboot_table_find); - -int coreboot_table_init(void __iomem *ptr) -{ - ptr_header = ptr; - - return 0; + return ret; } EXPORT_SYMBOL(coreboot_table_init); int coreboot_table_exit(void) { - if (ptr_header) + if (ptr_header) { + bus_unregister(&coreboot_bus_type); iounmap(ptr_header); + ptr_header = NULL; + } return 0; } diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h index 6eff1ae0c5d3..8ad95a94481b 100644 --- a/drivers/firmware/google/coreboot_table.h +++ b/drivers/firmware/google/coreboot_table.h @@ -3,7 +3,9 @@ * * Internal header for coreboot table access. * + * Copyright 2014 Gerd Hoffmann <kraxel@redhat.com> * Copyright 2017 Google Inc. + * Copyright 2017 Samuel Holland <samuel@sholland.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by @@ -20,14 +22,6 @@ #include <linux/io.h> -/* List of coreboot entry structures that is used */ -struct lb_cbmem_ref { - uint32_t tag; - uint32_t size; - - uint64_t cbmem_addr; -}; - /* Coreboot table header structure */ struct coreboot_table_header { char signature[4]; @@ -38,11 +32,67 @@ struct coreboot_table_header { u32 table_entries; }; -/* Retrieve coreboot table entry with tag *tag* and copy it to data */ -int coreboot_table_find(int tag, void *data, size_t data_size); +/* List of coreboot entry structures that is used */ +/* Generic */ +struct coreboot_table_entry { + u32 tag; + u32 size; +}; + +/* Points to a CBMEM entry */ +struct lb_cbmem_ref { + u32 tag; + u32 size; + + u64 cbmem_addr; +}; + +/* Describes framebuffer setup by coreboot */ +struct lb_framebuffer { + u32 tag; + u32 size; + + u64 physical_address; + u32 x_resolution; + u32 y_resolution; + u32 bytes_per_line; + u8 bits_per_pixel; + u8 red_mask_pos; + u8 red_mask_size; + u8 green_mask_pos; + u8 green_mask_size; + u8 blue_mask_pos; + u8 blue_mask_size; + u8 reserved_mask_pos; + u8 reserved_mask_size; +}; + +/* A device, additionally with information from coreboot. */ +struct coreboot_device { + struct device dev; + union { + struct coreboot_table_entry entry; + struct lb_cbmem_ref cbmem_ref; + struct lb_framebuffer framebuffer; + }; +}; + +/* A driver for handling devices described in coreboot tables. */ +struct coreboot_driver { + int (*probe)(struct coreboot_device *); + int (*remove)(struct coreboot_device *); + struct device_driver drv; + u32 tag; +}; + +/* Register a driver that uses the data from a coreboot table. */ +int coreboot_driver_register(struct coreboot_driver *driver); + +/* Unregister a driver that uses the data from a coreboot table. */ +void coreboot_driver_unregister(struct coreboot_driver *driver); /* Initialize coreboot table module given a pointer to iomem */ -int coreboot_table_init(void __iomem *ptr); +int coreboot_table_init(struct device *dev, void __iomem *ptr); /* Cleanup coreboot table module */ int coreboot_table_exit(void); diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c new file mode 100644 index 000000000000..b8b49c067157 --- /dev/null +++ b/drivers/firmware/google/framebuffer-coreboot.c @@ -0,0 +1,115 @@ +/* + * framebuffer-coreboot.c + * + * Memory based framebuffer accessed through coreboot table. + * + * Copyright 2012-2013 David Herrmann <dh.herrmann@gmail.com> + * Copyright 2017 Google Inc. + * Copyright 2017 Samuel Holland <samuel@sholland.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License v2.0 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_data/simplefb.h> +#include <linux/platform_device.h> + +#include "coreboot_table.h" + +#define CB_TAG_FRAMEBUFFER 0x12 + +static const struct simplefb_format formats[] = SIMPLEFB_FORMATS; + +static int framebuffer_probe(struct coreboot_device *dev) +{ + int i; + u32 length; + struct lb_framebuffer *fb = &dev->framebuffer; + struct platform_device *pdev; + struct resource res; + struct simplefb_platform_data pdata = { + .width = fb->x_resolution, + .height = fb->y_resolution, + .stride = fb->bytes_per_line, + .format = NULL, + }; + + for (i = 0; i < ARRAY_SIZE(formats); ++i) { + if (fb->bits_per_pixel == formats[i].bits_per_pixel && + fb->red_mask_pos == formats[i].red.offset && + fb->red_mask_size == formats[i].red.length && + fb->green_mask_pos == formats[i].green.offset && + fb->green_mask_size == formats[i].green.length && + fb->blue_mask_pos == formats[i].blue.offset && + fb->blue_mask_size == formats[i].blue.length && + fb->reserved_mask_pos == formats[i].transp.offset && + fb->reserved_mask_size == formats[i].transp.length) + pdata.format = formats[i].name; + } + if (!pdata.format) + return -ENODEV; + + memset(&res, 0, sizeof(res)); + res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + res.name = "Coreboot Framebuffer"; + res.start = fb->physical_address; + length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line); + res.end = res.start + length - 1; + if (res.end <= res.start) + return -EINVAL; + + pdev = platform_device_register_resndata(&dev->dev, + "simple-framebuffer", 0, + &res, 1, &pdata, + sizeof(pdata)); + if (IS_ERR(pdev)) + pr_warn("coreboot: could not register framebuffer\n"); + else + dev_set_drvdata(&dev->dev, pdev); + + return PTR_ERR_OR_ZERO(pdev); +} + +static int framebuffer_remove(struct coreboot_device *dev) +{ + struct platform_device *pdev = dev_get_drvdata(&dev->dev); + + platform_device_unregister(pdev); + + return 0; +} + +static struct coreboot_driver framebuffer_driver = { + .probe = framebuffer_probe, + .remove = framebuffer_remove, + .drv = { + .name = "framebuffer", + }, + .tag = CB_TAG_FRAMEBUFFER, +}; + +static int __init coreboot_framebuffer_init(void) +{ + return coreboot_driver_register(&framebuffer_driver); +} + +static void coreboot_framebuffer_exit(void) +{ + coreboot_driver_unregister(&framebuffer_driver); +} + +module_init(coreboot_framebuffer_init); +module_exit(coreboot_framebuffer_exit); + +MODULE_AUTHOR("Samuel Holland <samuel@sholland.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c index 52738887735c..b29e10757bfb 100644 --- a/drivers/firmware/google/memconsole-coreboot.c +++ b/drivers/firmware/google/memconsole-coreboot.c @@ -15,9 +15,9 @@ * GNU General Public License for more details. */ +#include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/platform_device.h> #include "memconsole.h" #include "coreboot_table.h" @@ -73,18 +73,19 @@ static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count) return done; } -static int memconsole_coreboot_init(phys_addr_t physaddr) +static int memconsole_probe(struct coreboot_device *dev) { struct cbmem_cons __iomem *tmp_cbmc; - tmp_cbmc = memremap(physaddr, sizeof(*tmp_cbmc), MEMREMAP_WB); + tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr, + sizeof(*tmp_cbmc), MEMREMAP_WB); if (!tmp_cbmc) return -ENOMEM; /* Read size only once to prevent overrun attack through /dev/mem. */ cbmem_console_size = tmp_cbmc->size_dont_access_after_boot; - cbmem_console = memremap(physaddr, + cbmem_console = memremap(dev->cbmem_ref.cbmem_addr, cbmem_console_size + sizeof(*cbmem_console), MEMREMAP_WB); memunmap(tmp_cbmc); @@ -93,26 +94,11 @@ static int memconsole_coreboot_init(phys_addr_t physaddr) return -ENOMEM; memconsole_setup(memconsole_coreboot_read); - return 0; -} - -static int memconsole_probe(struct platform_device *pdev) -{ - int ret; - struct lb_cbmem_ref entry; - - ret = coreboot_table_find(CB_TAG_CBMEM_CONSOLE, &entry, sizeof(entry)); - if (ret) - return ret; - - ret = memconsole_coreboot_init(entry.cbmem_addr); - if (ret) - return ret; return memconsole_sysfs_init(); } -static int memconsole_remove(struct platform_device *pdev) +static int memconsole_remove(struct coreboot_device *dev) { memconsole_exit(); @@ -122,28 +108,27 @@ static int memconsole_remove(struct platform_device *pdev) return 0; } -static struct platform_driver memconsole_driver = { +static struct coreboot_driver memconsole_driver = { .probe = memconsole_probe, .remove = memconsole_remove, - .driver = { + .drv = { .name = "memconsole", }, + .tag = CB_TAG_CBMEM_CONSOLE, }; -static int __init platform_memconsole_init(void) +static void coreboot_memconsole_exit(void) { - struct platform_device *pdev; - - pdev = platform_device_register_simple("memconsole", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); - - platform_driver_register(&memconsole_driver); + coreboot_driver_unregister(&memconsole_driver); +} - return 0; +static int __init coreboot_memconsole_init(void) +{ + return coreboot_driver_register(&memconsole_driver); } -module_init(platform_memconsole_init); +module_exit(coreboot_memconsole_exit); +module_init(coreboot_memconsole_init); MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index e4b40f2b4627..e9db895916c3 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -286,20 +286,15 @@ static int vpd_sections_init(phys_addr_t physaddr) return 0; } -static int vpd_probe(struct platform_device *pdev) +static int vpd_probe(struct coreboot_device *dev) { int ret; - struct lb_cbmem_ref entry; - - ret = coreboot_table_find(CB_TAG_VPD, &entry, sizeof(entry)); - if (ret) - return ret; vpd_kobj = kobject_create_and_add("vpd", firmware_kobj); if (!vpd_kobj) return -ENOMEM; - ret = vpd_sections_init(entry.cbmem_addr); + ret = vpd_sections_init(dev->cbmem_ref.cbmem_addr); if (ret) { kobject_put(vpd_kobj); return ret; @@ -308,7 +303,7 @@ static int vpd_probe(struct platform_device *pdev) return 0; } -static int vpd_remove(struct platform_device *pdev) +static int vpd_remove(struct coreboot_device *dev) { vpd_section_destroy(&ro_vpd); vpd_section_destroy(&rw_vpd); @@ -318,41 +313,27 @@ static int vpd_remove(struct platform_device *pdev) return 0; } -static struct platform_driver vpd_driver = { +static struct coreboot_driver vpd_driver = { .probe = vpd_probe, .remove = vpd_remove, - .driver = { + .drv = { .name = "vpd", }, + .tag = CB_TAG_VPD, }; -static struct platform_device *vpd_pdev; - -static int __init vpd_platform_init(void) +static int __init coreboot_vpd_init(void) { - int ret; - - ret = platform_driver_register(&vpd_driver); - if (ret) - return ret; - - vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0); - if (IS_ERR(vpd_pdev)) { - platform_driver_unregister(&vpd_driver); - return PTR_ERR(vpd_pdev); - } - - return 0; + return coreboot_driver_register(&vpd_driver); } -static void __exit vpd_platform_exit(void) +static void __exit coreboot_vpd_exit(void) { - platform_device_unregister(vpd_pdev); - platform_driver_unregister(&vpd_driver); + coreboot_driver_unregister(&vpd_driver); } -module_init(vpd_platform_init); -module_exit(vpd_platform_exit); +module_init(coreboot_vpd_init); +module_exit(coreboot_vpd_exit); MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index f47ef848bcd0..ee9c5420c47f 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -53,7 +53,6 @@ config FPGA_MGR_ALTERA_CVP config FPGA_MGR_ZYNQ_FPGA tristate "Xilinx Zynq FPGA" depends on ARCH_ZYNQ || COMPILE_TEST - depends on HAS_DMA help FPGA manager driver support for Xilinx Zynq FPGAs. @@ -70,6 +69,13 @@ config FPGA_MGR_ICE40_SPI help FPGA manager driver support for Lattice iCE40 FPGAs over SPI. +config FPGA_MGR_MACHXO2_SPI + tristate "Lattice MachXO2 SPI" + depends on SPI + help + FPGA manager driver support for Lattice MachXO2 configuration + over slave SPI interface. + config FPGA_MGR_TS73XX tristate "Technologic Systems TS-73xx SBC FPGA Manager" depends on ARCH_EP93XX && MACH_TS72XX diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 3cb276a0f88d..f9803dad6919 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_FPGA) += fpga-mgr.o obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o +obj-$(CONFIG_FPGA_MGR_MACHXO2_SPI) += machxo2-spi.o obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 77b04e4b3254..dd4edd8f22ce 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -401,6 +401,7 @@ static int altera_cvp_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct altera_cvp_conf *conf; + struct fpga_manager *mgr; u16 cmd, val; int ret; @@ -452,16 +453,24 @@ static int altera_cvp_probe(struct pci_dev *pdev, snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s", ALTERA_CVP_MGR_NAME, pci_name(pdev)); - ret = fpga_mgr_register(&pdev->dev, conf->mgr_name, - &altera_cvp_ops, conf); - if (ret) + mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, + &altera_cvp_ops, conf); + if (!mgr) + return -ENOMEM; + + pci_set_drvdata(pdev, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) { + fpga_mgr_free(mgr); goto err_unmap; + } ret = driver_create_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); if (ret) { dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n"); - fpga_mgr_unregister(&pdev->dev); + fpga_mgr_unregister(mgr); goto err_unmap; } @@ -483,7 +492,7 @@ static void altera_cvp_remove(struct pci_dev *pdev) u16 cmd; driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg); - fpga_mgr_unregister(&pdev->dev); + fpga_mgr_unregister(mgr); pci_iounmap(pdev, conf->map); pci_release_region(pdev, CVP_BAR); pci_read_config_word(pdev, PCI_COMMAND, &cmd); diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c index d4eeb74388da..23660ccd634b 100644 --- a/drivers/fpga/altera-fpga2sdram.c +++ b/drivers/fpga/altera-fpga2sdram.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices * * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ /* @@ -106,6 +95,7 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct alt_fpga2sdram_data *priv; + struct fpga_bridge *br; u32 enable; struct regmap *sysmgr; int ret = 0; @@ -131,10 +121,18 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) /* Get f2s bridge configuration saved in handoff register */ regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask); - ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME, - &altera_fpga2sdram_br_ops, priv); - if (ret) + br = fpga_bridge_create(dev, F2S_BRIDGE_NAME, + &altera_fpga2sdram_br_ops, priv); + if (!br) + return -ENOMEM; + + platform_set_drvdata(pdev, br); + + ret = fpga_bridge_register(br); + if (ret) { + fpga_bridge_free(br); return ret; + } dev_info(dev, "driver initialized with handoff %08x\n", priv->mask); @@ -146,7 +144,7 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) (enable ? "enabling" : "disabling")); ret = _alt_fpga2sdram_enable_set(priv, enable); if (ret) { - fpga_bridge_unregister(&pdev->dev); + fpga_bridge_unregister(br); return ret; } } @@ -157,7 +155,9 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) static int alt_fpga_bridge_remove(struct platform_device *pdev) { - fpga_bridge_unregister(&pdev->dev); + struct fpga_bridge *br = platform_get_drvdata(pdev); + + fpga_bridge_unregister(br); return 0; } diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index 6159cfcf78a2..ffd586c48ecf 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Freeze Bridge Controller * * Copyright (C) 2016 Altera Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/delay.h> #include <linux/io.h> @@ -221,8 +210,10 @@ static int altera_freeze_br_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; void __iomem *base_addr; struct altera_freeze_br_data *priv; + struct fpga_bridge *br; struct resource *res; u32 status, revision; + int ret; if (!np) return -ENODEV; @@ -254,13 +245,27 @@ static int altera_freeze_br_probe(struct platform_device *pdev) priv->base_addr = base_addr; - return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME, - &altera_freeze_br_br_ops, priv); + br = fpga_bridge_create(dev, FREEZE_BRIDGE_NAME, + &altera_freeze_br_br_ops, priv); + if (!br) + return -ENOMEM; + + platform_set_drvdata(pdev, br); + + ret = fpga_bridge_register(br); + if (ret) { + fpga_bridge_free(br); + return ret; + } + + return 0; } static int altera_freeze_br_remove(struct platform_device *pdev) { - fpga_bridge_unregister(&pdev->dev); + struct fpga_bridge *br = platform_get_drvdata(pdev); + + fpga_bridge_unregister(br); return 0; } diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c index 406d2f10741f..a974d3f60321 100644 --- a/drivers/fpga/altera-hps2fpga.c +++ b/drivers/fpga/altera-hps2fpga.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices * @@ -6,18 +7,6 @@ * Includes this patch from the mailing list: * fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters * Signed-off-by: Anatolij Gustschin <agust@denx.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ /* @@ -139,6 +128,7 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct altera_hps2fpga_data *priv; const struct of_device_id *of_id; + struct fpga_bridge *br; u32 enable; int ret; @@ -190,11 +180,24 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) } } - ret = fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops, - priv); -err: + br = fpga_bridge_create(dev, priv->name, &altera_hps2fpga_br_ops, priv); + if (!br) { + ret = -ENOMEM; + goto err; + } + + platform_set_drvdata(pdev, br); + + ret = fpga_bridge_register(br); if (ret) - clk_disable_unprepare(priv->clk); + goto err_free; + + return 0; + +err_free: + fpga_bridge_free(br); +err: + clk_disable_unprepare(priv->clk); return ret; } @@ -204,7 +207,7 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev) struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct altera_hps2fpga_data *priv = bridge->priv; - fpga_bridge_unregister(&pdev->dev); + fpga_bridge_unregister(bridge); clk_disable_unprepare(priv->clk); diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c index 8fb36b8b4648..b293d83143f1 100644 --- a/drivers/fpga/altera-pr-ip-core-plat.c +++ b/drivers/fpga/altera-pr-ip-core-plat.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Altera Partial Reconfiguration IP Core * @@ -5,18 +6,6 @@ * * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation * by Alan Tull <atull@opensource.altera.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fpga/altera-pr-ip-core.h> #include <linux/module.h> diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index a7b31f9797ce..65e0b6a2c031 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Altera Partial Reconfiguration IP Core * @@ -5,18 +6,6 @@ * * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation * by Alan Tull <atull@opensource.altera.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/delay.h> #include <linux/fpga/altera-pr-ip-core.h> @@ -187,6 +176,8 @@ static const struct fpga_manager_ops alt_pr_ops = { int alt_pr_register(struct device *dev, void __iomem *reg_base) { struct alt_pr_priv *priv; + struct fpga_manager *mgr; + int ret; u32 val; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -201,15 +192,27 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base) (val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT, (int)(val & ALT_PR_CSR_PR_START)); - return fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv); + mgr = fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv); + if (!mgr) + return -ENOMEM; + + dev_set_drvdata(dev, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } EXPORT_SYMBOL_GPL(alt_pr_register); int alt_pr_unregister(struct device *dev) { + struct fpga_manager *mgr = dev_get_drvdata(dev); + dev_dbg(dev, "%s\n", __func__); - fpga_mgr_unregister(dev); + fpga_mgr_unregister(mgr); return 0; } diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 06d212a3d49d..24b25c626036 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -238,6 +238,8 @@ static int altera_ps_probe(struct spi_device *spi) { struct altera_ps_conf *conf; const struct of_device_id *of_id; + struct fpga_manager *mgr; + int ret; conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); if (!conf) @@ -273,13 +275,25 @@ static int altera_ps_probe(struct spi_device *spi) snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s", dev_driver_string(&spi->dev), dev_name(&spi->dev)); - return fpga_mgr_register(&spi->dev, conf->mgr_name, - &altera_ps_ops, conf); + mgr = fpga_mgr_create(&spi->dev, conf->mgr_name, + &altera_ps_ops, conf); + if (!mgr) + return -ENOMEM; + + spi_set_drvdata(spi, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } static int altera_ps_remove(struct spi_device *spi) { - fpga_mgr_unregister(&spi->dev); + struct fpga_manager *mgr = spi_get_drvdata(spi); + + fpga_mgr_unregister(mgr); return 0; } diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 31bd2c59c305..24b8f98b73ec 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -1,20 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Bridge Framework Driver * * Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved. * Copyright (C) 2017 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/fpga/fpga-bridge.h> #include <linux/idr.h> @@ -132,6 +121,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) /** * fpga_bridge_get - get an exclusive reference to a fpga bridge * @dev: parent device that fpga bridge was registered with + * @info: fpga manager info * * Given a device, get an exclusive reference to a fpga bridge. * @@ -328,28 +318,29 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_register - register a fpga bridge driver + * fpga_bridge_create - create and initialize a struct fpga_bridge * @dev: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data * - * Return: 0 for success, error code otherwise. + * Return: struct fpga_bridge or NULL */ -int fpga_bridge_register(struct device *dev, const char *name, - const struct fpga_bridge_ops *br_ops, void *priv) +struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv) { struct fpga_bridge *bridge; int id, ret = 0; if (!name || !strlen(name)) { dev_err(dev, "Attempt to register with no name!\n"); - return -EINVAL; + return NULL; } bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) - return -ENOMEM; + return NULL; id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL); if (id < 0) { @@ -370,40 +361,62 @@ int fpga_bridge_register(struct device *dev, const char *name, bridge->dev.parent = dev; bridge->dev.of_node = dev->of_node; bridge->dev.id = id; - dev_set_drvdata(dev, bridge); ret = dev_set_name(&bridge->dev, "br%d", id); if (ret) goto error_device; - ret = device_add(&bridge->dev); - if (ret) - goto error_device; - - of_platform_populate(dev->of_node, NULL, NULL, dev); - - dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n", - bridge->name); - - return 0; + return bridge; error_device: ida_simple_remove(&fpga_bridge_ida, id); error_kfree: kfree(bridge); - return ret; + return NULL; } -EXPORT_SYMBOL_GPL(fpga_bridge_register); +EXPORT_SYMBOL_GPL(fpga_bridge_create); /** - * fpga_bridge_unregister - unregister a fpga bridge driver - * @dev: FPGA bridge device from pdev + * fpga_bridge_free - free a fpga bridge and its id + * @bridge: FPGA bridge struct created by fpga_bridge_create */ -void fpga_bridge_unregister(struct device *dev) +void fpga_bridge_free(struct fpga_bridge *bridge) { - struct fpga_bridge *bridge = dev_get_drvdata(dev); + ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); + kfree(bridge); +} +EXPORT_SYMBOL_GPL(fpga_bridge_free); +/** + * fpga_bridge_register - register a fpga bridge + * @bridge: FPGA bridge struct created by fpga_bridge_create + * + * Return: 0 for success, error code otherwise. + */ +int fpga_bridge_register(struct fpga_bridge *bridge) +{ + struct device *dev = &bridge->dev; + int ret; + + ret = device_add(dev); + if (ret) + return ret; + + of_platform_populate(dev->of_node, NULL, NULL, dev); + + dev_info(dev->parent, "fpga bridge [%s] registered\n", bridge->name); + + return 0; +} +EXPORT_SYMBOL_GPL(fpga_bridge_register); + +/** + * fpga_bridge_unregister - unregister and free a fpga bridge + * @bridge: FPGA bridge struct created by fpga_bridge_create + */ +void fpga_bridge_unregister(struct fpga_bridge *bridge) +{ /* * If the low level driver provides a method for putting bridge into * a desired state upon unregister, do it. @@ -419,8 +432,7 @@ static void fpga_bridge_dev_release(struct device *dev) { struct fpga_bridge *bridge = to_fpga_bridge(dev); - ida_simple_remove(&fpga_bridge_ida, bridge->dev.id); - kfree(bridge); + fpga_bridge_free(bridge); } static int __init fpga_bridge_dev_init(void) diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index 9939d2cbc9a6..c1564cf827fe 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Manager Core * @@ -6,18 +7,6 @@ * * With code from the mailing list: * Copyright (C) 2013 Xilinx, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/firmware.h> #include <linux/fpga/fpga-mgr.h> @@ -32,6 +21,12 @@ static DEFINE_IDA(fpga_mgr_ida); static struct class *fpga_mgr_class; +/** + * fpga_image_info_alloc - Allocate a FPGA image info struct + * @dev: owning device + * + * Return: struct fpga_image_info or NULL + */ struct fpga_image_info *fpga_image_info_alloc(struct device *dev) { struct fpga_image_info *info; @@ -50,6 +45,10 @@ struct fpga_image_info *fpga_image_info_alloc(struct device *dev) } EXPORT_SYMBOL_GPL(fpga_image_info_alloc); +/** + * fpga_image_info_free - Free a FPGA image info struct + * @info: FPGA image info struct to free + */ void fpga_image_info_free(struct fpga_image_info *info) { struct device *dev; @@ -234,7 +233,7 @@ static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr, /** * fpga_mgr_buf_load - load fpga from image in buffer * @mgr: fpga manager - * @flags: flags setting fpga confuration modes + * @info: fpga image info * @buf: buffer contain fpga image * @count: byte count of buf * @@ -343,6 +342,16 @@ static int fpga_mgr_firmware_load(struct fpga_manager *mgr, return ret; } +/** + * fpga_mgr_load - load FPGA from scatter/gather table, buffer, or firmware + * @mgr: fpga manager + * @info: fpga image information. + * + * Load the FPGA from an image which is indicated in @info. If successful, the + * FPGA ends up in operating mode. + * + * Return: 0 on success, negative error code otherwise. + */ int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info) { if (info->sgt) @@ -429,11 +438,9 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data) } /** - * fpga_mgr_get - get a reference to a fpga mgr + * fpga_mgr_get - Given a device, get a reference to a fpga mgr. * @dev: parent device that fpga mgr was registered with * - * Given a device, get a reference to a fpga mgr. - * * Return: fpga manager struct or IS_ERR() condition containing error code. */ struct fpga_manager *fpga_mgr_get(struct device *dev) @@ -453,10 +460,9 @@ static int fpga_mgr_of_node_match(struct device *dev, const void *data) } /** - * of_fpga_mgr_get - get a reference to a fpga mgr - * @node: device node + * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr. * - * Given a device node, get a reference to a fpga mgr. + * @node: device node * * Return: fpga manager struct or IS_ERR() condition containing error code. */ @@ -489,7 +495,10 @@ EXPORT_SYMBOL_GPL(fpga_mgr_put); * @mgr: fpga manager * * Given a pointer to FPGA Manager (from fpga_mgr_get() or - * of_fpga_mgr_put()) attempt to get the mutex. + * of_fpga_mgr_put()) attempt to get the mutex. The user should call + * fpga_mgr_lock() and verify that it returns 0 before attempting to + * program the FPGA. Likewise, the user should call fpga_mgr_unlock + * when done programming the FPGA. * * Return: 0 for success or -EBUSY */ @@ -505,7 +514,7 @@ int fpga_mgr_lock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_lock); /** - * fpga_mgr_unlock - Unlock FPGA manager + * fpga_mgr_unlock - Unlock FPGA manager after done programming * @mgr: fpga manager */ void fpga_mgr_unlock(struct fpga_manager *mgr) @@ -515,17 +524,17 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_register - register a low level fpga manager driver + * fpga_mgr_create - create and initialize a FPGA manager struct * @dev: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data * - * Return: 0 on success, negative error code otherwise. + * Return: pointer to struct fpga_manager or NULL */ -int fpga_mgr_register(struct device *dev, const char *name, - const struct fpga_manager_ops *mops, - void *priv) +struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name, + const struct fpga_manager_ops *mops, + void *priv) { struct fpga_manager *mgr; int id, ret; @@ -534,17 +543,17 @@ int fpga_mgr_register(struct device *dev, const char *name, !mops->write_init || (!mops->write && !mops->write_sg) || (mops->write && mops->write_sg)) { dev_err(dev, "Attempt to register without fpga_manager_ops\n"); - return -EINVAL; + return NULL; } if (!name || !strlen(name)) { dev_err(dev, "Attempt to register with no name!\n"); - return -EINVAL; + return NULL; } mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) - return -ENOMEM; + return NULL; id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL); if (id < 0) { @@ -558,25 +567,56 @@ int fpga_mgr_register(struct device *dev, const char *name, mgr->mops = mops; mgr->priv = priv; - /* - * Initialize framework state by requesting low level driver read state - * from device. FPGA may be in reset mode or may have been programmed - * by bootloader or EEPROM. - */ - mgr->state = mgr->mops->state(mgr); - device_initialize(&mgr->dev); mgr->dev.class = fpga_mgr_class; mgr->dev.groups = mops->groups; mgr->dev.parent = dev; mgr->dev.of_node = dev->of_node; mgr->dev.id = id; - dev_set_drvdata(dev, mgr); ret = dev_set_name(&mgr->dev, "fpga%d", id); if (ret) goto error_device; + return mgr; + +error_device: + ida_simple_remove(&fpga_mgr_ida, id); +error_kfree: + kfree(mgr); + + return NULL; +} +EXPORT_SYMBOL_GPL(fpga_mgr_create); + +/** + * fpga_mgr_free - deallocate a FPGA manager + * @mgr: fpga manager struct created by fpga_mgr_create + */ +void fpga_mgr_free(struct fpga_manager *mgr) +{ + ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); + kfree(mgr); +} +EXPORT_SYMBOL_GPL(fpga_mgr_free); + +/** + * fpga_mgr_register - register a FPGA manager + * @mgr: fpga manager struct created by fpga_mgr_create + * + * Return: 0 on success, negative error code otherwise. + */ +int fpga_mgr_register(struct fpga_manager *mgr) +{ + int ret; + + /* + * Initialize framework state by requesting low level driver read state + * from device. FPGA may be in reset mode or may have been programmed + * by bootloader or EEPROM. + */ + mgr->state = mgr->mops->state(mgr); + ret = device_add(&mgr->dev); if (ret) goto error_device; @@ -586,22 +626,18 @@ int fpga_mgr_register(struct device *dev, const char *name, return 0; error_device: - ida_simple_remove(&fpga_mgr_ida, id); -error_kfree: - kfree(mgr); + ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); return ret; } EXPORT_SYMBOL_GPL(fpga_mgr_register); /** - * fpga_mgr_unregister - unregister a low level fpga manager driver - * @dev: fpga manager device from pdev + * fpga_mgr_unregister - unregister and free a FPGA manager + * @mgr: fpga manager struct */ -void fpga_mgr_unregister(struct device *dev) +void fpga_mgr_unregister(struct fpga_manager *mgr) { - struct fpga_manager *mgr = dev_get_drvdata(dev); - dev_info(&mgr->dev, "%s %s\n", __func__, mgr->name); /* @@ -619,8 +655,7 @@ static void fpga_mgr_dev_release(struct device *dev) { struct fpga_manager *mgr = to_fpga_manager(dev); - ida_simple_remove(&fpga_mgr_ida, mgr->dev.id); - kfree(mgr); + fpga_mgr_free(mgr); } static int __init fpga_mgr_class_init(void) diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index edab2a2e03ef..6d214d75c7be 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -1,22 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Region - Device Tree support for FPGA programming under Linux * * Copyright (C) 2013-2016 Altera Corporation * Copyright (C) 2017 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ - #include <linux/fpga/fpga-bridge.h> #include <linux/fpga/fpga-mgr.h> #include <linux/fpga/fpga-region.h> @@ -93,8 +81,16 @@ static void fpga_region_put(struct fpga_region *region) /** * fpga_region_program_fpga - program FPGA + * * @region: FPGA region + * * Program an FPGA using fpga image info (region->info). + * If the region has a get_bridges function, the exclusive reference for the + * bridges will be held if programming succeeds. This is intended to prevent + * reprogramming the region until the caller considers it safe to do so. + * The caller will need to call fpga_bridges_put() before attempting to + * reprogram the region. + * * Return 0 for success or negative error code. */ int fpga_region_program_fpga(struct fpga_region *region) @@ -162,45 +158,86 @@ err_put_region: } EXPORT_SYMBOL_GPL(fpga_region_program_fpga); -int fpga_region_register(struct device *dev, struct fpga_region *region) +/** + * fpga_region_create - alloc and init a struct fpga_region + * @dev: device parent + * @mgr: manager that programs this region + * @get_bridges: optional function to get bridges to a list + * + * Return: struct fpga_region or NULL + */ +struct fpga_region +*fpga_region_create(struct device *dev, + struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *)) { + struct fpga_region *region; int id, ret = 0; + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return NULL; + id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL); if (id < 0) - return id; + goto err_free; + region->mgr = mgr; + region->get_bridges = get_bridges; mutex_init(®ion->mutex); INIT_LIST_HEAD(®ion->bridge_list); + device_initialize(®ion->dev); - region->dev.groups = region->groups; region->dev.class = fpga_region_class; region->dev.parent = dev; region->dev.of_node = dev->of_node; region->dev.id = id; - dev_set_drvdata(dev, region); ret = dev_set_name(®ion->dev, "region%d", id); if (ret) goto err_remove; - ret = device_add(®ion->dev); - if (ret) - goto err_remove; - - return 0; + return region; err_remove: ida_simple_remove(&fpga_region_ida, id); - return ret; +err_free: + kfree(region); + + return NULL; +} +EXPORT_SYMBOL_GPL(fpga_region_create); + +/** + * fpga_region_free - free a struct fpga_region + * @region: FPGA region created by fpga_region_create + */ +void fpga_region_free(struct fpga_region *region) +{ + ida_simple_remove(&fpga_region_ida, region->dev.id); + kfree(region); +} +EXPORT_SYMBOL_GPL(fpga_region_free); + +/** + * fpga_region_register - register a FPGA region + * @region: FPGA region created by fpga_region_create + * Return: 0 or -errno + */ +int fpga_region_register(struct fpga_region *region) +{ + return device_add(®ion->dev); + } EXPORT_SYMBOL_GPL(fpga_region_register); -int fpga_region_unregister(struct fpga_region *region) +/** + * fpga_region_unregister - unregister and free a FPGA region + * @region: FPGA region + */ +void fpga_region_unregister(struct fpga_region *region) { device_unregister(®ion->dev); - - return 0; } EXPORT_SYMBOL_GPL(fpga_region_unregister); @@ -208,7 +245,7 @@ static void fpga_region_dev_release(struct device *dev) { struct fpga_region *region = to_fpga_region(dev); - ida_simple_remove(&fpga_region_ida, region->dev.id); + fpga_region_free(region); } /** diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index 7fca82023062..5981c7ee7a7d 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -133,6 +133,7 @@ static int ice40_fpga_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct ice40_fpga_priv *priv; + struct fpga_manager *mgr; int ret; priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); @@ -174,14 +175,26 @@ static int ice40_fpga_probe(struct spi_device *spi) return ret; } - /* Register with the FPGA manager */ - return fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager", - &ice40_fpga_ops, priv); + mgr = fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager", + &ice40_fpga_ops, priv); + if (!mgr) + return -ENOMEM; + + spi_set_drvdata(spi, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } static int ice40_fpga_remove(struct spi_device *spi) { - fpga_mgr_unregister(&spi->dev); + struct fpga_manager *mgr = spi_get_drvdata(spi); + + fpga_mgr_unregister(mgr); + return 0; } diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c new file mode 100644 index 000000000000..a582e0000c97 --- /dev/null +++ b/drivers/fpga/machxo2-spi.c @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lattice MachXO2 Slave SPI Driver + * + * Manage Lattice FPGA firmware that is loaded over SPI using + * the slave serial configuration interface. + * + * Copyright (C) 2018 Paolo Pisati <p.pisati@gmail.com> + */ + +#include <linux/delay.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +/* MachXO2 Programming Guide - sysCONFIG Programming Commands */ +#define IDCODE_PUB {0xe0, 0x00, 0x00, 0x00} +#define ISC_ENABLE {0xc6, 0x08, 0x00, 0x00} +#define ISC_ERASE {0x0e, 0x04, 0x00, 0x00} +#define ISC_PROGRAMDONE {0x5e, 0x00, 0x00, 0x00} +#define LSC_INITADDRESS {0x46, 0x00, 0x00, 0x00} +#define LSC_PROGINCRNV {0x70, 0x00, 0x00, 0x01} +#define LSC_READ_STATUS {0x3c, 0x00, 0x00, 0x00} +#define LSC_REFRESH {0x79, 0x00, 0x00, 0x00} + +/* + * Max CCLK in Slave SPI mode according to 'MachXO2 Family Data + * Sheet' sysCONFIG Port Timing Specifications (3-36) + */ +#define MACHXO2_MAX_SPEED 66000000 + +#define MACHXO2_LOW_DELAY_USEC 5 +#define MACHXO2_HIGH_DELAY_USEC 200 +#define MACHXO2_REFRESH_USEC 4800 +#define MACHXO2_MAX_BUSY_LOOP 128 +#define MACHXO2_MAX_REFRESH_LOOP 16 + +#define MACHXO2_PAGE_SIZE 16 +#define MACHXO2_BUF_SIZE (MACHXO2_PAGE_SIZE + 4) + +/* Status register bits, errors and error mask */ +#define BUSY 12 +#define DONE 8 +#define DVER 27 +#define ENAB 9 +#define ERRBITS 23 +#define ERRMASK 7 +#define FAIL 13 + +#define ENOERR 0 /* no error */ +#define EID 1 +#define ECMD 2 +#define ECRC 3 +#define EPREAM 4 /* preamble error */ +#define EABRT 5 /* abort error */ +#define EOVERFL 6 /* overflow error */ +#define ESDMEOF 7 /* SDM EOF */ + +static inline u8 get_err(unsigned long *status) +{ + return (*status >> ERRBITS) & ERRMASK; +} + +static int get_status(struct spi_device *spi, unsigned long *status) +{ + struct spi_message msg; + struct spi_transfer rx, tx; + static const u8 cmd[] = LSC_READ_STATUS; + int ret; + + memset(&rx, 0, sizeof(rx)); + memset(&tx, 0, sizeof(tx)); + tx.tx_buf = cmd; + tx.len = sizeof(cmd); + rx.rx_buf = status; + rx.len = 4; + spi_message_init(&msg); + spi_message_add_tail(&tx, &msg); + spi_message_add_tail(&rx, &msg); + ret = spi_sync(spi, &msg); + if (ret) + return ret; + + *status = be32_to_cpu(*status); + + return 0; +} + +#ifdef DEBUG +static const char *get_err_string(u8 err) +{ + switch (err) { + case ENOERR: return "No Error"; + case EID: return "ID ERR"; + case ECMD: return "CMD ERR"; + case ECRC: return "CRC ERR"; + case EPREAM: return "Preamble ERR"; + case EABRT: return "Abort ERR"; + case EOVERFL: return "Overflow ERR"; + case ESDMEOF: return "SDM EOF"; + } + + return "Default switch case"; +} +#endif + +static void dump_status_reg(unsigned long *status) +{ +#ifdef DEBUG + pr_debug("machxo2 status: 0x%08lX - done=%d, cfgena=%d, busy=%d, fail=%d, devver=%d, err=%s\n", + *status, test_bit(DONE, status), test_bit(ENAB, status), + test_bit(BUSY, status), test_bit(FAIL, status), + test_bit(DVER, status), get_err_string(get_err(status))); +#endif +} + +static int wait_until_not_busy(struct spi_device *spi) +{ + unsigned long status; + int ret, loop = 0; + + do { + ret = get_status(spi, &status); + if (ret) + return ret; + if (++loop >= MACHXO2_MAX_BUSY_LOOP) + return -EBUSY; + } while (test_bit(BUSY, &status)); + + return 0; +} + +static int machxo2_cleanup(struct fpga_manager *mgr) +{ + struct spi_device *spi = mgr->priv; + struct spi_message msg; + struct spi_transfer tx[2]; + static const u8 erase[] = ISC_ERASE; + static const u8 refresh[] = LSC_REFRESH; + int ret; + + memset(tx, 0, sizeof(tx)); + spi_message_init(&msg); + tx[0].tx_buf = &erase; + tx[0].len = sizeof(erase); + spi_message_add_tail(&tx[0], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + + ret = wait_until_not_busy(spi); + if (ret) + goto fail; + + spi_message_init(&msg); + tx[1].tx_buf = &refresh; + tx[1].len = sizeof(refresh); + tx[1].delay_usecs = MACHXO2_REFRESH_USEC; + spi_message_add_tail(&tx[1], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + + return 0; +fail: + dev_err(&mgr->dev, "Cleanup failed\n"); + + return ret; +} + +static enum fpga_mgr_states machxo2_spi_state(struct fpga_manager *mgr) +{ + struct spi_device *spi = mgr->priv; + unsigned long status; + + get_status(spi, &status); + if (!test_bit(BUSY, &status) && test_bit(DONE, &status) && + get_err(&status) == ENOERR) + return FPGA_MGR_STATE_OPERATING; + + return FPGA_MGR_STATE_UNKNOWN; +} + +static int machxo2_write_init(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count) +{ + struct spi_device *spi = mgr->priv; + struct spi_message msg; + struct spi_transfer tx[3]; + static const u8 enable[] = ISC_ENABLE; + static const u8 erase[] = ISC_ERASE; + static const u8 initaddr[] = LSC_INITADDRESS; + unsigned long status; + int ret; + + if ((info->flags & FPGA_MGR_PARTIAL_RECONFIG)) { + dev_err(&mgr->dev, + "Partial reconfiguration is not supported\n"); + return -ENOTSUPP; + } + + get_status(spi, &status); + dump_status_reg(&status); + memset(tx, 0, sizeof(tx)); + spi_message_init(&msg); + tx[0].tx_buf = &enable; + tx[0].len = sizeof(enable); + tx[0].delay_usecs = MACHXO2_LOW_DELAY_USEC; + spi_message_add_tail(&tx[0], &msg); + + tx[1].tx_buf = &erase; + tx[1].len = sizeof(erase); + spi_message_add_tail(&tx[1], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + + ret = wait_until_not_busy(spi); + if (ret) + goto fail; + + get_status(spi, &status); + if (test_bit(FAIL, &status)) + goto fail; + dump_status_reg(&status); + + spi_message_init(&msg); + tx[2].tx_buf = &initaddr; + tx[2].len = sizeof(initaddr); + spi_message_add_tail(&tx[2], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + + get_status(spi, &status); + dump_status_reg(&status); + + return 0; +fail: + dev_err(&mgr->dev, "Error during FPGA init.\n"); + + return ret; +} + +static int machxo2_write(struct fpga_manager *mgr, const char *buf, + size_t count) +{ + struct spi_device *spi = mgr->priv; + struct spi_message msg; + struct spi_transfer tx; + static const u8 progincr[] = LSC_PROGINCRNV; + u8 payload[MACHXO2_BUF_SIZE]; + unsigned long status; + int i, ret; + + if (count % MACHXO2_PAGE_SIZE != 0) { + dev_err(&mgr->dev, "Malformed payload.\n"); + return -EINVAL; + } + get_status(spi, &status); + dump_status_reg(&status); + memcpy(payload, &progincr, sizeof(progincr)); + for (i = 0; i < count; i += MACHXO2_PAGE_SIZE) { + memcpy(&payload[sizeof(progincr)], &buf[i], MACHXO2_PAGE_SIZE); + memset(&tx, 0, sizeof(tx)); + spi_message_init(&msg); + tx.tx_buf = payload; + tx.len = MACHXO2_BUF_SIZE; + tx.delay_usecs = MACHXO2_HIGH_DELAY_USEC; + spi_message_add_tail(&tx, &msg); + ret = spi_sync(spi, &msg); + if (ret) { + dev_err(&mgr->dev, "Error loading the bitstream.\n"); + return ret; + } + } + get_status(spi, &status); + dump_status_reg(&status); + + return 0; +} + +static int machxo2_write_complete(struct fpga_manager *mgr, + struct fpga_image_info *info) +{ + struct spi_device *spi = mgr->priv; + struct spi_message msg; + struct spi_transfer tx[2]; + static const u8 progdone[] = ISC_PROGRAMDONE; + static const u8 refresh[] = LSC_REFRESH; + unsigned long status; + int ret, refreshloop = 0; + + memset(tx, 0, sizeof(tx)); + spi_message_init(&msg); + tx[0].tx_buf = &progdone; + tx[0].len = sizeof(progdone); + spi_message_add_tail(&tx[0], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + ret = wait_until_not_busy(spi); + if (ret) + goto fail; + + get_status(spi, &status); + dump_status_reg(&status); + if (!test_bit(DONE, &status)) { + machxo2_cleanup(mgr); + goto fail; + } + + do { + spi_message_init(&msg); + tx[1].tx_buf = &refresh; + tx[1].len = sizeof(refresh); + tx[1].delay_usecs = MACHXO2_REFRESH_USEC; + spi_message_add_tail(&tx[1], &msg); + ret = spi_sync(spi, &msg); + if (ret) + goto fail; + + /* check refresh status */ + get_status(spi, &status); + dump_status_reg(&status); + if (!test_bit(BUSY, &status) && test_bit(DONE, &status) && + get_err(&status) == ENOERR) + break; + if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) { + machxo2_cleanup(mgr); + goto fail; + } + } while (1); + + get_status(spi, &status); + dump_status_reg(&status); + + return 0; +fail: + dev_err(&mgr->dev, "Refresh failed.\n"); + + return ret; +} + +static const struct fpga_manager_ops machxo2_ops = { + .state = machxo2_spi_state, + .write_init = machxo2_write_init, + .write = machxo2_write, + .write_complete = machxo2_write_complete, +}; + +static int machxo2_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct fpga_manager *mgr; + int ret; + + if (spi->max_speed_hz > MACHXO2_MAX_SPEED) { + dev_err(dev, "Speed is too high\n"); + return -EINVAL; + } + + mgr = fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager", + &machxo2_ops, spi); + if (!mgr) + return -ENOMEM; + + spi_set_drvdata(spi, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; +} + +static int machxo2_spi_remove(struct spi_device *spi) +{ + struct fpga_manager *mgr = spi_get_drvdata(spi); + + fpga_mgr_unregister(mgr); + + return 0; +} + +static const struct of_device_id of_match[] = { + { .compatible = "lattice,machxo2-slave-spi", }, + {} +}; +MODULE_DEVICE_TABLE(of, of_match); + +static const struct spi_device_id lattice_ids[] = { + { "machxo2-slave-spi", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, lattice_ids); + +static struct spi_driver machxo2_spi_driver = { + .driver = { + .name = "machxo2-slave-spi", + .of_match_table = of_match_ptr(of_match), + }, + .probe = machxo2_spi_probe, + .remove = machxo2_spi_remove, + .id_table = lattice_ids, +}; + +module_spi_driver(machxo2_spi_driver) + +MODULE_AUTHOR("Paolo Pisati <p.pisati@gmail.com>"); +MODULE_DESCRIPTION("Load Lattice FPGA firmware over SPI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index 119ff75522f1..35fabb8083fb 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -1,22 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Region - Device Tree support for FPGA programming under Linux * * Copyright (C) 2013-2016 Altera Corporation * Copyright (C) 2017 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ - #include <linux/fpga/fpga-bridge.h> #include <linux/fpga/fpga-mgr.h> #include <linux/fpga/fpga-region.h> @@ -422,27 +410,25 @@ static int of_fpga_region_probe(struct platform_device *pdev) if (IS_ERR(mgr)) return -EPROBE_DEFER; - region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); + region = fpga_region_create(dev, mgr, of_fpga_region_get_bridges); if (!region) { ret = -ENOMEM; goto eprobe_mgr_put; } - region->mgr = mgr; - - /* Specify how to get bridges for this type of region. */ - region->get_bridges = of_fpga_region_get_bridges; - - ret = fpga_region_register(dev, region); + ret = fpga_region_register(region); if (ret) - goto eprobe_mgr_put; + goto eprobe_free; of_platform_populate(np, fpga_region_of_match, NULL, ®ion->dev); + dev_set_drvdata(dev, region); dev_info(dev, "FPGA Region probed\n"); return 0; +eprobe_free: + fpga_region_free(region); eprobe_mgr_put: fpga_mgr_put(mgr); return ret; diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c index a46e343a5b72..be30c48eb6e4 100644 --- a/drivers/fpga/socfpga-a10.c +++ b/drivers/fpga/socfpga-a10.c @@ -1,21 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Manager Driver for Altera Arria10 SoCFPGA * * Copyright (C) 2015-2016 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ - #include <linux/clk.h> #include <linux/device.h> #include <linux/delay.h> @@ -482,6 +470,7 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct a10_fpga_priv *priv; void __iomem *reg_base; + struct fpga_manager *mgr; struct resource *res; int ret; @@ -519,9 +508,16 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) return -EBUSY; } - ret = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager", - &socfpga_a10_fpga_mgr_ops, priv); + mgr = fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager", + &socfpga_a10_fpga_mgr_ops, priv); + if (!mgr) + return -ENOMEM; + + platform_set_drvdata(pdev, mgr); + + ret = fpga_mgr_register(mgr); if (ret) { + fpga_mgr_free(mgr); clk_disable_unprepare(priv->clk); return ret; } @@ -534,7 +530,7 @@ static int socfpga_a10_fpga_remove(struct platform_device *pdev) struct fpga_manager *mgr = platform_get_drvdata(pdev); struct a10_fpga_priv *priv = mgr->priv; - fpga_mgr_unregister(&pdev->dev); + fpga_mgr_unregister(mgr); clk_disable_unprepare(priv->clk); return 0; diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c index b6672e66cda6..959d71f26896 100644 --- a/drivers/fpga/socfpga.c +++ b/drivers/fpga/socfpga.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FPGA Manager Driver for Altera SOCFPGA * * Copyright (C) 2013-2015 Altera Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/completion.h> #include <linux/delay.h> @@ -555,6 +544,7 @@ static int socfpga_fpga_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct socfpga_fpga_priv *priv; + struct fpga_manager *mgr; struct resource *res; int ret; @@ -581,13 +571,25 @@ static int socfpga_fpga_probe(struct platform_device *pdev) if (ret) return ret; - return fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager", - &socfpga_fpga_ops, priv); + mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager", + &socfpga_fpga_ops, priv); + if (!mgr) + return -ENOMEM; + + platform_set_drvdata(pdev, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } static int socfpga_fpga_remove(struct platform_device *pdev) { - fpga_mgr_unregister(&pdev->dev); + struct fpga_manager *mgr = platform_get_drvdata(pdev); + + fpga_mgr_unregister(mgr); return 0; } diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index f6a96b42e2ca..08efd1895b1b 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -116,7 +116,9 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) { struct device *kdev = &pdev->dev; struct ts73xx_fpga_priv *priv; + struct fpga_manager *mgr; struct resource *res; + int ret; priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -131,13 +133,25 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) return PTR_ERR(priv->io_base); } - return fpga_mgr_register(kdev, "TS-73xx FPGA Manager", - &ts73xx_fpga_ops, priv); + mgr = fpga_mgr_create(kdev, "TS-73xx FPGA Manager", + &ts73xx_fpga_ops, priv); + if (!mgr) + return -ENOMEM; + + platform_set_drvdata(pdev, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } static int ts73xx_fpga_remove(struct platform_device *pdev) { - fpga_mgr_unregister(&pdev->dev); + struct fpga_manager *mgr = platform_get_drvdata(pdev); + + fpga_mgr_unregister(mgr); return 0; } diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index 0d7743089414..07ba1539e82c 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -94,6 +94,7 @@ MODULE_DEVICE_TABLE(of, xlnx_pr_decoupler_of_match); static int xlnx_pr_decoupler_probe(struct platform_device *pdev) { struct xlnx_pr_decoupler_data *priv; + struct fpga_bridge *br; int err; struct resource *res; @@ -120,16 +121,27 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev) clk_disable(priv->clk); - err = fpga_bridge_register(&pdev->dev, "Xilinx PR Decoupler", - &xlnx_pr_decoupler_br_ops, priv); + br = fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler", + &xlnx_pr_decoupler_br_ops, priv); + if (!br) { + err = -ENOMEM; + goto err_clk; + } + + platform_set_drvdata(pdev, br); + err = fpga_bridge_register(br); if (err) { dev_err(&pdev->dev, "unable to register Xilinx PR Decoupler"); - clk_unprepare(priv->clk); - return err; + goto err_clk; } return 0; + +err_clk: + clk_unprepare(priv->clk); + + return err; } static int xlnx_pr_decoupler_remove(struct platform_device *pdev) @@ -137,7 +149,7 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev) struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct xlnx_pr_decoupler_data *p = bridge->priv; - fpga_bridge_unregister(&pdev->dev); + fpga_bridge_unregister(bridge); clk_unprepare(p->clk); diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index 9b62a4c2a3df..8d1945966533 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -143,6 +143,8 @@ static const struct fpga_manager_ops xilinx_spi_ops = { static int xilinx_spi_probe(struct spi_device *spi) { struct xilinx_spi_conf *conf; + struct fpga_manager *mgr; + int ret; conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL); if (!conf) @@ -165,13 +167,25 @@ static int xilinx_spi_probe(struct spi_device *spi) return PTR_ERR(conf->done); } - return fpga_mgr_register(&spi->dev, "Xilinx Slave Serial FPGA Manager", - &xilinx_spi_ops, conf); + mgr = fpga_mgr_create(&spi->dev, "Xilinx Slave Serial FPGA Manager", + &xilinx_spi_ops, conf); + if (!mgr) + return -ENOMEM; + + spi_set_drvdata(spi, mgr); + + ret = fpga_mgr_register(mgr); + if (ret) + fpga_mgr_free(mgr); + + return ret; } static int xilinx_spi_remove(struct spi_device *spi) { - fpga_mgr_unregister(&spi->dev); + struct fpga_manager *mgr = spi_get_drvdata(spi); + + fpga_mgr_unregister(mgr); return 0; } diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 70b15b303471..3110e00121ca 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -558,6 +558,7 @@ static int zynq_fpga_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct zynq_fpga_priv *priv; + struct fpga_manager *mgr; struct resource *res; int err; @@ -613,10 +614,17 @@ static int zynq_fpga_probe(struct platform_device *pdev) clk_disable(priv->clk); - err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager", - &zynq_fpga_ops, priv); + mgr = fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager", + &zynq_fpga_ops, priv); + if (!mgr) + return -ENOMEM; + + platform_set_drvdata(pdev, mgr); + + err = fpga_mgr_register(mgr); if (err) { dev_err(dev, "unable to register FPGA manager\n"); + fpga_mgr_free(mgr); clk_unprepare(priv->clk); return err; } @@ -632,7 +640,7 @@ static int zynq_fpga_remove(struct platform_device *pdev) mgr = platform_get_drvdata(pdev); priv = mgr->priv; - fpga_mgr_unregister(&pdev->dev); + fpga_mgr_unregister(mgr); clk_unprepare(priv->clk); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 72855182b191..ced041899456 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -63,6 +63,9 @@ static __u32 vmbus_get_next_version(__u32 current_version) case (VERSION_WIN10): return VERSION_WIN8_1; + case (VERSION_WIN10_V5): + return VERSION_WIN10; + case (VERSION_WS2008): default: return VERSION_INVAL; @@ -80,9 +83,29 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, msg = (struct vmbus_channel_initiate_contact *)msginfo->msg; + memset(msg, 0, sizeof(*msg)); msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT; msg->vmbus_version_requested = version; - msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); + + /* + * VMBus protocol 5.0 (VERSION_WIN10_V5) requires that we must use + * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, + * and for subsequent messages, we must use the Message Connection ID + * field in the host-returned Version Response Message. And, with + * VERSION_WIN10_V5, we don't use msg->interrupt_page, but we tell + * the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for + * compatibility. + * + * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). + */ + if (version >= VERSION_WIN10_V5) { + msg->msg_sint = VMBUS_MESSAGE_SINT; + vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4; + } else { + msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); + vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID; + } + msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); /* @@ -137,6 +160,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, /* Check if successful */ if (msginfo->response.version_response.version_supported) { vmbus_connection.conn_state = CONNECTED; + + if (version >= VERSION_WIN10_V5) + vmbus_connection.msg_conn_id = + msginfo->response.version_response.msg_conn_id; } else { return -ECONNREFUSED; } @@ -354,13 +381,14 @@ void vmbus_on_event(unsigned long data) */ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep) { + struct vmbus_channel_message_header *hdr; union hv_connection_id conn_id; int ret = 0; int retries = 0; u32 usec = 1; conn_id.asu32 = 0; - conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; + conn_id.u.id = vmbus_connection.msg_conn_id; /* * hv_post_message() can have transient failures because of @@ -373,6 +401,18 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep) switch (ret) { case HV_STATUS_INVALID_CONNECTION_ID: /* + * See vmbus_negotiate_version(): VMBus protocol 5.0 + * requires that we must use + * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate + * Contact message, but on old hosts that only + * support VMBus protocol 4.0 or lower, here we get + * HV_STATUS_INVALID_CONNECTION_ID and we should + * return an error immediately without retrying. + */ + hdr = buffer; + if (hdr->msgtype == CHANNELMSG_INITIATE_CONTACT) + return -EINVAL; + /* * We could get this if we send messages too * frequently. */ diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index f761bef36e77..72eaba3d50fc 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -187,6 +187,7 @@ struct hv_input_post_message { enum { VMBUS_MESSAGE_CONNECTION_ID = 1, + VMBUS_MESSAGE_CONNECTION_ID_4 = 4, VMBUS_MESSAGE_PORT_ID = 1, VMBUS_EVENT_CONNECTION_ID = 2, VMBUS_EVENT_PORT_ID = 2, @@ -302,6 +303,8 @@ struct vmbus_connection { */ int connect_cpu; + u32 msg_conn_id; + atomic_t offer_in_progress; enum vmbus_connect_state conn_state; diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 9cdb3fbc8c1f..45b2460f3166 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017 Linaro Limited. All rights reserved. * * Author: Leo Yan <leo.yan@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - * */ #include <linux/amba/bus.h> #include <linux/coresight.h> @@ -315,7 +303,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata) } pc = debug_adjust_pc(drvdata); - dev_emerg(dev, " EDPCSR: [<%px>] %pS\n", (void *)pc, (void *)pc); + dev_emerg(dev, " EDPCSR: %pS\n", (void *)pc); if (drvdata->edcidsr_present) dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr); diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c index 043da86b0fe9..f6d0571ab9dd 100644 --- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c +++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/amba/bus.h> diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 580cd381adf3..9b6c55523c58 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -1,15 +1,8 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Embedded Trace Buffer driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <asm/local.h> diff --git a/drivers/hwtracing/coresight/coresight-etm-cp14.c b/drivers/hwtracing/coresight/coresight-etm-cp14.c index 12a220682117..4174a8d355d2 100644 --- a/drivers/hwtracing/coresight/coresight-etm-cp14.c +++ b/drivers/hwtracing/coresight/coresight-etm-cp14.c @@ -1,13 +1,6 @@ -/* Copyright (c) 2012, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 4e5ed6597f2f..677695635211 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/coresight.h> diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index 3ffc9feb2d64..4197df4faf5e 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _CORESIGHT_ETM_PERF_H diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index 70b0a248c321..e8b4549e30e2 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h @@ -1,13 +1,6 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. */ #ifndef _CORESIGHT_CORESIGHT_ETM_H diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c index 6e547ec6fead..9435c1481f61 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/pm_runtime.h> diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index 39f42fdd503d..15ed64d51a5b 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -1,15 +1,8 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Program Flow Trace driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index d21961710713..4eb8da785ce0 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/pm_runtime.h> diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index cf364a514c12..9bc04c50d45b 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1,13 +1,6 @@ -/* Copyright (c) 2014, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index b3b5ea7b7fb3..b7c4a6f6c6b9 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -1,13 +1,6 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. */ #ifndef _CORESIGHT_CORESIGHT_ETM_H diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 9f8ac0bef853..448145a36675 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -1,15 +1,8 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Funnel driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index f1d0e21d8cab..0e5a74dae6a6 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -1,13 +1,6 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. */ #ifndef _CORESIGHT_PRIV_H diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 3756e71cb8f5..8d2eaaab6c2f 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -1,15 +1,8 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Replicator driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 15e7ef3891f5..c46c70aec1d5 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -1,16 +1,9 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * Description: CoreSight System Trace Macrocell driver * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Initial implementation by Pratik Patel * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org> * diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index e2513b786242..61d849b11c26 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2016 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/circ_buf.h> diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 68fbc8f7450e..02f747afa2ba 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2016 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/coresight.h> @@ -124,10 +113,9 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) bool used = false; unsigned long flags; void __iomem *vaddr = NULL; - dma_addr_t paddr; + dma_addr_t paddr = 0; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - /* * If we don't have a buffer release the lock and allocate memory. * Otherwise keep the lock and move along. @@ -164,11 +152,11 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) goto out; /* - * If drvdata::buf == NULL, use the memory allocated above. + * If drvdata::vaddr == NULL, use the memory allocated above. * Otherwise a buffer still exists from a previous session, so * simply use that. */ - if (drvdata->buf == NULL) { + if (drvdata->vaddr == NULL) { used = true; drvdata->vaddr = vaddr; drvdata->paddr = paddr; diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 0ea04f588de0..456f122df74f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Trace Memory Controller driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 8df7a813f537..dfaff077a7fc 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier <mathieu.poirier@linaro.org> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _CORESIGHT_TMC_H diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 805f7c2210fe..01b7457fe8fc 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -1,15 +1,8 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * Description: CoreSight Trace Port Interface Unit driver - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/kernel.h> diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 389c4baeca9d..29e834aab539 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -1,13 +1,6 @@ -/* Copyright (c) 2012, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012, The Linux Foundation. All rights reserved. */ #include <linux/kernel.h> @@ -1026,8 +1019,10 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) dev_set_name(&csdev->dev, "%s", desc->pdata->name); ret = device_register(&csdev->dev); - if (ret) - goto err_device_register; + if (ret) { + put_device(&csdev->dev); + goto err_kzalloc_csdev; + } mutex_lock(&coresight_mutex); @@ -1038,8 +1033,6 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) return csdev; -err_device_register: - kfree(conns); err_kzalloc_conns: kfree(refcnts); err_kzalloc_refcnts: diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index 7c375443ede6..a33a92ebe74b 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c @@ -1,13 +1,6 @@ -/* Copyright (c) 2012, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012, The Linux Foundation. All rights reserved. */ #include <linux/types.h> diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c index 7da75644c750..ce868e095410 100644 --- a/drivers/hwtracing/stm/ftrace.c +++ b/drivers/hwtracing/stm/ftrace.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Simple kernel driver to link kernel Ftrace and an STM device * Copyright (c) 2016, Linaro Ltd. * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * * STM Ftrace will be registered as a trace_export. */ diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 5d713008749b..3726eacdf65d 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -113,6 +113,20 @@ config IBM_ASM for information on the specific driver level and support statement for your IBM server. +config IBMVMC + tristate "IBM Virtual Management Channel support" + depends on PPC_PSERIES + help + This is the IBM POWER Virtual Management Channel + + This driver is to be used for the POWER Virtual + Management Channel virtual adapter on the PowerVM + platform. It provides both request/response and + async message support through the /dev/ibmvmc node. + + To compile this driver as a module, choose M here: the + module will be called ibmvmc. + config PHANTOM tristate "Sensable PHANToM (PCI)" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 20be70c3f118..af22bbc3d00c 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_IBM_ASM) += ibmasm/ +obj-$(CONFIG_IBMVMC) += ibmvmc.o obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 7ff315ad3692..c6ec872800a2 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -128,11 +128,12 @@ void cxl_context_set_mapping(struct cxl_context *ctx, mutex_unlock(&ctx->mapping_lock); } -static int cxl_mmap_fault(struct vm_fault *vmf) +static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct cxl_context *ctx = vma->vm_file->private_data; u64 area, offset; + vm_fault_t ret; offset = vmf->pgoff << PAGE_SHIFT; @@ -169,11 +170,11 @@ static int cxl_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); + ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); mutex_unlock(&ctx->status_mutex); - return VM_FAULT_NOPAGE; + return ret; } static const struct vm_operations_struct cxl_mmap_vmops = { diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c new file mode 100644 index 000000000000..fb83d1375638 --- /dev/null +++ b/drivers/misc/ibmvmc.c @@ -0,0 +1,2418 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly <bryantly@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/major.h> +#include <linux/string.h> +#include <linux/fcntl.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/miscdevice.h> +#include <linux/sched/signal.h> + +#include <asm/byteorder.h> +#include <asm/irq.h> +#include <asm/vio.h> + +#include "ibmvmc.h" + +#define IBMVMC_DRIVER_VERSION "1.0" + +/* + * Static global variables + */ +static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait); + +static const char ibmvmc_driver_name[] = "ibmvmc"; + +static struct ibmvmc_struct ibmvmc; +static struct ibmvmc_hmc hmcs[MAX_HMCS]; +static struct crq_server_adapter ibmvmc_adapter; + +static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE; +static int ibmvmc_max_hmcs = DEFAULT_HMCS; +static int ibmvmc_max_mtu = DEFAULT_MTU; + +static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba, + u64 dliobn, u64 dlioba) +{ + long rc = 0; + + /* Ensure all writes to source memory are visible before hcall */ + dma_wmb(); + pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", + length, sliobn, slioba, dliobn, dlioba); + rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba, + dliobn, dlioba); + pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc); + + return rc; +} + +static inline void h_free_crq(uint32_t unit_address) +{ + long rc = 0; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + rc = plpar_hcall_norets(H_FREE_CRQ, unit_address); + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); +} + +/** + * h_request_vmc: - request a hypervisor virtual management channel device + * @vmc_index: drc index of the vmc device created + * + * Requests the hypervisor create a new virtual management channel device, + * allowing this partition to send hypervisor virtualization control + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static inline long h_request_vmc(u32 *vmc_index) +{ + long rc = 0; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + do { + if (H_IS_LONG_BUSY(rc)) + msleep(get_longbusy_msecs(rc)); + + /* Call to request the VMC device from phyp */ + rc = plpar_hcall(H_REQUEST_VMC, retbuf); + pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc); + *vmc_index = retbuf[0]; + } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); + + return rc; +} + +/* routines for managing a command/response queue */ +/** + * ibmvmc_handle_event: - Interrupt handler for crq events + * @irq: number of irq to handle, not used + * @dev_instance: crq_server_adapter that received interrupt + * + * Disables interrupts and schedules ibmvmc_task + * + * Always returns IRQ_HANDLED + */ +static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)dev_instance; + + vio_disable_interrupts(to_vio_dev(adapter->dev)); + tasklet_schedule(&adapter->work_task); + + return IRQ_HANDLED; +} + +/** + * ibmvmc_release_crq_queue - Release CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + + free_irq(vdev->irq, (void *)adapter); + tasklet_kill(&adapter->work_task); + + if (adapter->reset_task) + kthread_stop(adapter->reset_task); + + h_free_crq(vdev->unit_address); + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); + free_page((unsigned long)queue->msgs); +} + +/** + * ibmvmc_reset_crq_queue - Reset CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * This function calls h_free_crq and then calls H_REG_CRQ and does all the + * bookkeeping to get us back to where we can communicate. + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + + /* Close the CRQ */ + h_free_crq(vdev->unit_address); + + /* Clean out the queue */ + memset(queue->msgs, 0x00, PAGE_SIZE); + queue->cur = 0; + + /* And re-open it again */ + rc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + if (rc == 2) + /* Adapter is good, but other end is not ready */ + dev_warn(adapter->dev, "Partner adapter not ready\n"); + else if (rc != 0) + dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc); + + return rc; +} + +/** + * crq_queue_next_crq: - Returns the next entry in message queue + * @queue: crq_queue to use + * + * Returns pointer to next entry in queue, or NULL if there are no new + * entried in the CRQ. + */ +static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue) +{ + struct ibmvmc_crq_msg *crq; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + crq = &queue->msgs[queue->cur]; + if (crq->valid & 0x80) { + if (++queue->cur == queue->size) + queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + dma_rmb(); + } else { + crq = NULL; + } + + spin_unlock_irqrestore(&queue->lock, flags); + + return crq; +} + +/** + * ibmvmc_send_crq - Send CRQ + * + * @adapter: crq_server_adapter struct + * @word1: Word1 Data field + * @word2: Word2 Data field + * + * Return: + * 0 - Success + * Non-Zero - Failure + */ +static long ibmvmc_send_crq(struct crq_server_adapter *adapter, + u64 word1, u64 word2) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + long rc = 0; + + dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n", + vdev->unit_address, word1, word2); + + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the other side to prevent it from fetching any stale data. + */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); + dev_dbg(adapter->dev, "rc = 0x%lx\n", rc); + + return rc; +} + +/** + * alloc_dma_buffer - Create DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @dma_handle: DMA address field + * + * Allocates memory for the command queue and maps remote memory into an + * ioba. + * + * Returns a pointer to the buffer + */ +static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, + dma_addr_t *dma_handle) +{ + /* allocate memory */ + void *buffer = kzalloc(size, GFP_KERNEL); + + if (!buffer) { + *dma_handle = 0; + return NULL; + } + + /* DMA map */ + *dma_handle = dma_map_single(&vdev->dev, buffer, size, + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(&vdev->dev, *dma_handle)) { + *dma_handle = 0; + kzfree(buffer); + return NULL; + } + + return buffer; +} + +/** + * free_dma_buffer - Free DMA Buffer + * + * @vdev: vio_dev struct + * @size: Size field + * @vaddr: Address field + * @dma_handle: DMA address field + * + * Releases memory for a command queue and unmaps mapped remote memory. + */ +static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + /* DMA unmap */ + dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL); + + /* deallocate memory */ + kzfree(vaddr); +} + +/** + * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer + * + * @hmc_index: HMC Index Field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) + return NULL; + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid && buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer + * + * @adapter: crq_server_adapter struct + * @hmc_index: Hmc Index field + * + * Return: + * Pointer to ibmvmc_buffer + */ +static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter, + u8 hmc_index) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_buffer *ret_buf = NULL; + unsigned long i; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n", + hmc_index); + return NULL; + } + + buffer = hmcs[hmc_index].buffer; + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].free && + buffer[i].owner == VMC_BUF_OWNER_ALPHA) { + buffer[i].free = 0; + ret_buf = &buffer[i]; + break; + } + } + + return ret_buf; +} + +/** + * ibmvmc_free_hmc_buffer - Free an HMC Buffer + * + * @hmc: ibmvmc_hmc struct + * @buffer: ibmvmc_buffer struct + * + */ +static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc, + struct ibmvmc_buffer *buffer) +{ + unsigned long flags; + + spin_lock_irqsave(&hmc->lock, flags); + buffer->free = 1; + spin_unlock_irqrestore(&hmc->lock, flags); +} + +/** + * ibmvmc_count_hmc_buffers - Count HMC Buffers + * + * @hmc_index: HMC Index field + * @valid: Valid number of buffers field + * @free: Free number of buffers field + * + */ +static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid, + unsigned int *free) +{ + struct ibmvmc_buffer *buffer; + unsigned long i; + unsigned long flags; + + if (hmc_index > ibmvmc.max_hmc_index) + return; + + if (!valid || !free) + return; + + *valid = 0; *free = 0; + + buffer = hmcs[hmc_index].buffer; + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + *valid = *valid + 1; + if (buffer[i].free) + *free = *free + 1; + } + } + + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); +} + +/** + * ibmvmc_get_free_hmc - Get Free HMC + * + * Return: + * Pointer to an available HMC Connection + * Null otherwise + */ +static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void) +{ + unsigned long i; + unsigned long flags; + + /* + * Find an available HMC connection. + */ + for (i = 0; i <= ibmvmc.max_hmc_index; i++) { + spin_lock_irqsave(&hmcs[i].lock, flags); + if (hmcs[i].state == ibmhmc_state_free) { + hmcs[i].index = i; + hmcs[i].state = ibmhmc_state_initial; + spin_unlock_irqrestore(&hmcs[i].lock, flags); + return &hmcs[i]; + } + spin_unlock_irqrestore(&hmcs[i].lock, flags); + } + + return NULL; +} + +/** + * ibmvmc_return_hmc - Return an HMC Connection + * + * @hmc: ibmvmc_hmc struct + * @release_readers: Number of readers connected to session + * + * This function releases the HMC connections back into the pool. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers) +{ + struct ibmvmc_buffer *buffer; + struct crq_server_adapter *adapter; + struct vio_dev *vdev; + unsigned long i; + unsigned long flags; + + if (!hmc || !hmc->adapter) + return -EIO; + + if (release_readers) { + if (hmc->file_session) { + struct ibmvmc_file_session *session = hmc->file_session; + + session->valid = 0; + wake_up_interruptible(&ibmvmc_read_wait); + } + } + + adapter = hmc->adapter; + vdev = to_vio_dev(adapter->dev); + + spin_lock_irqsave(&hmc->lock, flags); + hmc->index = 0; + hmc->state = ibmhmc_state_free; + hmc->queue_head = 0; + hmc->queue_tail = 0; + buffer = hmc->buffer; + for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { + if (buffer[i].valid) { + free_dma_buffer(vdev, + ibmvmc.max_mtu, + buffer[i].real_addr_local, + buffer[i].dma_addr_local); + dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i); + } + memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer)); + + hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID; + } + + spin_unlock_irqrestore(&hmc->lock, flags); + + return 0; +} + +/** + * ibmvmc_send_open - Interface Open + * @buffer: Pointer to ibmvmc_buffer struct + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition as the result of a + * management partition device request. It causes the hypervisor to + * prepare a set of data buffers for the management application connection + * indicated HMC idx. A unique HMC Idx would be used if multiple management + * applications running concurrently were desired. Before responding to this + * command, the hypervisor must provide the management partition with at + * least one of these new buffers via the Add Buffer. This indicates whether + * the messages are inbound or outbound from the hypervisor. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_open(struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", + (unsigned long)buffer->size, (unsigned long)adapter->liobn, + (unsigned long)buffer->dma_addr_local, + (unsigned long)adapter->riobn, + (unsigned long)buffer->dma_addr_remote); + + rc = h_copy_rdma(buffer->size, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n", + rc); + return -EIO; + } + + hmc->state = ibmhmc_state_opening; + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_OPEN; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_close - Interface Close + * @hmc: Pointer to ibmvmc_hmc struct + * + * This command is sent by the management partition to terminate a + * management application to hypervisor connection. When this command is + * sent, the management partition has quiesced all I/O operations to all + * buffers associated with this management application connection, and + * has freed any storage for these buffers. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_close(struct ibmvmc_hmc *hmc) +{ + struct ibmvmc_crq_msg crq_msg; + struct crq_server_adapter *adapter; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + if (!hmc || !hmc->adapter) + return -EIO; + + adapter = hmc->adapter; + + dev_info(adapter->dev, "CRQ send: close\n"); + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CLOSE; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.rsvd = 0; + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_send_capabilities - Send VMC Capabilities + * + * @adapter: crq_server_adapter struct + * + * The capabilities message is an administrative message sent after the CRQ + * initialization sequence of messages and is used to exchange VMC capabilities + * between the management partition and the hypervisor. The management + * partition must send this message and the hypervisor must respond with VMC + * capabilities Response message before HMC interface message can begin. Any + * HMC interface messages received before the exchange of capabilities has + * complete are dropped. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter) +{ + struct ibmvmc_admin_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_CAP; + crq_msg.status = 0; + crq_msg.rsvd[0] = 0; + crq_msg.rsvd[1] = 0; + crq_msg.max_hmc = ibmvmc_max_hmcs; + crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu); + crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size); + crq_msg.crq_size = cpu_to_be16(adapter->queue.size); + crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION); + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + ibmvmc.state = ibmvmc_state_capabilities; + + return 0; +} + +/** + * ibmvmc_send_add_buffer_resp - Add Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Add Buffer message. The Status field indicates the result of + * the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_ADD_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_rem_buffer_resp - Remove Buffer Response + * + * @adapter: crq_server_adapter struct + * @status: Status field + * @hmc_session: HMC Session field + * @hmc_index: HMC Index field + * @buffer_id: Buffer Id field + * + * This command is sent by the management partition to the hypervisor in + * response to the Remove Buffer message. The Buffer ID field indicates + * which buffer the management partition selected to remove. The Status + * field indicates the result of the command. + * + * Return: + * 0 - Success + */ +static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter, + u8 status, u8 hmc_session, + u8 hmc_index, u16 buffer_id) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + + dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n"); + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_REM_BUF_RESP; + crq_msg.status = status; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc_session; + crq_msg.hmc_index = hmc_index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer_id); + crq_msg.rsvd = 0; + crq_msg.var3.rsvd = 0; + + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return 0; +} + +/** + * ibmvmc_send_msg - Signal Message + * + * @adapter: crq_server_adapter struct + * @buffer: ibmvmc_buffer struct + * @hmc: ibmvmc_hmc struct + * @msg_length: message length field + * + * This command is sent between the management partition and the hypervisor + * in order to signal the arrival of an HMC protocol message. The command + * can be sent by both the management partition and the hypervisor. It is + * used for all traffic between the management application and the hypervisor, + * regardless of who initiated the communication. + * + * There is no response to this message. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_send_msg(struct crq_server_adapter *adapter, + struct ibmvmc_buffer *buffer, + struct ibmvmc_hmc *hmc, int msg_len) +{ + struct ibmvmc_crq_msg crq_msg; + __be64 *crq_as_u64 = (__be64 *)&crq_msg; + int rc = 0; + + dev_dbg(adapter->dev, "CRQ send: rdma to HV\n"); + rc = h_copy_rdma(msg_len, + adapter->liobn, + buffer->dma_addr_local, + adapter->riobn, + buffer->dma_addr_remote); + if (rc) { + dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n", + rc); + return rc; + } + + crq_msg.valid = 0x80; + crq_msg.type = VMC_MSG_SIGNAL; + crq_msg.status = 0; + crq_msg.var1.rsvd = 0; + crq_msg.hmc_session = hmc->session; + crq_msg.hmc_index = hmc->index; + crq_msg.var2.buffer_id = cpu_to_be16(buffer->id); + crq_msg.var3.msg_len = cpu_to_be32(msg_len); + dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n", + be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1])); + + buffer->owner = VMC_BUF_OWNER_HV; + ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + + return rc; +} + +/** + * ibmvmc_open - Open Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + */ +static int ibmvmc_open(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + int rc = 0; + + pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)inode, (unsigned long)file, + ibmvmc.state); + + session = kzalloc(sizeof(*session), GFP_KERNEL); + session->file = file; + file->private_data = session; + + return rc; +} + +/** + * ibmvmc_close - Close Session + * + * @inode: inode struct + * @file: file struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_close(struct inode *inode, struct file *file) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + int rc = 0; + unsigned long flags; + + pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__, + (unsigned long)file, ibmvmc.state); + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (hmc) { + if (!hmc->adapter) + return -EIO; + + if (ibmvmc.state == ibmvmc_state_failed) { + dev_warn(hmc->adapter->dev, "close: state_failed\n"); + return -EIO; + } + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state >= ibmhmc_state_opening) { + rc = ibmvmc_send_close(hmc); + if (rc) + dev_warn(hmc->adapter->dev, "close: send_close failed.\n"); + } + spin_unlock_irqrestore(&hmc->lock, flags); + } + + kzfree(session); + + return rc; +} + +/** + * ibmvmc_read - Read + * + * @file: file struct + * @buf: Character buffer + * @nbytes: Size in bytes + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes, + loff_t *ppos) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + struct crq_server_adapter *adapter; + struct ibmvmc_buffer *buffer; + ssize_t n; + ssize_t retval = 0; + unsigned long flags; + DEFINE_WAIT(wait); + + pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n", + (unsigned long)file, (unsigned long)buf, + (unsigned long)nbytes); + + if (nbytes == 0) + return 0; + + if (nbytes > ibmvmc.max_mtu) { + pr_warn("ibmvmc: read: nbytes invalid 0x%x\n", + (unsigned int)nbytes); + return -EINVAL; + } + + session = file->private_data; + if (!session) { + pr_warn("ibmvmc: read: no session\n"); + return -EIO; + } + + hmc = session->hmc; + if (!hmc) { + pr_warn("ibmvmc: read: no hmc\n"); + return -EIO; + } + + adapter = hmc->adapter; + if (!adapter) { + pr_warn("ibmvmc: read: no adapter\n"); + return -EIO; + } + + do { + prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE); + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->queue_tail != hmc->queue_head) + /* Data is available */ + break; + + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!session->valid) { + retval = -EBADFD; + goto out; + } + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + + schedule(); + + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + } while (1); + + buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]); + hmc->queue_tail++; + if (hmc->queue_tail == ibmvmc_max_buf_pool_size) + hmc->queue_tail = 0; + spin_unlock_irqrestore(&hmc->lock, flags); + + nbytes = min_t(size_t, nbytes, buffer->msg_len); + n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes); + dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes); + ibmvmc_free_hmc_buffer(hmc, buffer); + retval = nbytes; + + if (n) { + dev_warn(adapter->dev, "read: copy to user failed.\n"); + retval = -EFAULT; + } + + out: + finish_wait(&ibmvmc_read_wait, &wait); + dev_dbg(adapter->dev, "read: out %ld\n", retval); + return retval; +} + +/** + * ibmvmc_poll - Poll + * + * @file: file struct + * @wait: Poll Table + * + * Return: + * poll.h return values + */ +static unsigned int ibmvmc_poll(struct file *file, poll_table *wait) +{ + struct ibmvmc_file_session *session; + struct ibmvmc_hmc *hmc; + unsigned int mask = 0; + + session = file->private_data; + if (!session) + return 0; + + hmc = session->hmc; + if (!hmc) + return 0; + + poll_wait(file, &ibmvmc_read_wait, wait); + + if (hmc->queue_head != hmc->queue_tail) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +/** + * ibmvmc_write - Write + * + * @file: file struct + * @buf: Character buffer + * @count: Count field + * @ppos: Offset + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static ssize_t ibmvmc_write(struct file *file, const char *buffer, + size_t count, loff_t *ppos) +{ + struct ibmvmc_buffer *vmc_buffer; + struct ibmvmc_file_session *session; + struct crq_server_adapter *adapter; + struct ibmvmc_hmc *hmc; + unsigned char *buf; + unsigned long flags; + size_t bytes; + const char *p = buffer; + size_t c = count; + int ret = 0; + + session = file->private_data; + if (!session) + return -EIO; + + hmc = session->hmc; + if (!hmc) + return -EIO; + + spin_lock_irqsave(&hmc->lock, flags); + if (hmc->state == ibmhmc_state_free) { + /* HMC connection is not valid (possibly was reset under us). */ + ret = -EIO; + goto out; + } + + adapter = hmc->adapter; + if (!adapter) { + ret = -EIO; + goto out; + } + + if (count > ibmvmc.max_mtu) { + dev_warn(adapter->dev, "invalid buffer size 0x%lx\n", + (unsigned long)count); + ret = -EIO; + goto out; + } + + /* Waiting for the open resp message to the ioctl(1) - retry */ + if (hmc->state == ibmhmc_state_opening) { + ret = -EBUSY; + goto out; + } + + /* Make sure the ioctl() was called & the open msg sent, and that + * the HMC connection has not failed. + */ + if (hmc->state != ibmhmc_state_ready) { + ret = -EIO; + goto out; + } + + vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + if (!vmc_buffer) { + /* No buffer available for the msg send, or we have not yet + * completed the open/open_resp sequence. Retry until this is + * complete. + */ + ret = -EBUSY; + goto out; + } + if (!vmc_buffer->real_addr_local) { + dev_err(adapter->dev, "no buffer storage assigned\n"); + ret = -EIO; + goto out; + } + buf = vmc_buffer->real_addr_local; + + while (c > 0) { + bytes = min_t(size_t, c, vmc_buffer->size); + + bytes -= copy_from_user(buf, p, bytes); + if (!bytes) { + ret = -EFAULT; + goto out; + } + c -= bytes; + p += bytes; + } + if (p == buffer) + goto out; + + file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file)); + mark_inode_dirty(file->f_path.dentry->d_inode); + + dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n", + (unsigned long)file, (unsigned long)count); + + ibmvmc_send_msg(adapter, vmc_buffer, hmc, count); + ret = p - buffer; + out: + spin_unlock_irqrestore(&hmc->lock, flags); + return (ssize_t)(ret); +} + +/** + * ibmvmc_setup_hmc - Setup the HMC + * + * @session: ibmvmc_file_session struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session) +{ + struct ibmvmc_hmc *hmc; + unsigned int valid, free, index; + + if (ibmvmc.state == ibmvmc_state_failed) { + pr_warn("ibmvmc: Reserve HMC: state_failed\n"); + return -EIO; + } + + if (ibmvmc.state < ibmvmc_state_ready) { + pr_warn("ibmvmc: Reserve HMC: not state_ready\n"); + return -EAGAIN; + } + + /* Device is busy until capabilities have been exchanged and we + * have a generic buffer for each possible HMC connection. + */ + for (index = 0; index <= ibmvmc.max_hmc_index; index++) { + valid = 0; + ibmvmc_count_hmc_buffers(index, &valid, &free); + if (valid == 0) { + pr_warn("ibmvmc: buffers not ready for index %d\n", + index); + return -ENOBUFS; + } + } + + /* Get an hmc object, and transition to ibmhmc_state_initial */ + hmc = ibmvmc_get_free_hmc(); + if (!hmc) { + pr_warn("%s: free hmc not found\n", __func__); + return -EBUSY; + } + + hmc->session = hmc->session + 1; + if (hmc->session == 0xff) + hmc->session = 1; + + session->hmc = hmc; + hmc->adapter = &ibmvmc_adapter; + hmc->file_session = session; + session->valid = 1; + + return 0; +} + +/** + * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID + * + * @session: ibmvmc_file_session struct + * @new_hmc_id: HMC id field + * + * IOCTL command to setup the hmc id + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session, + unsigned char __user *new_hmc_id) +{ + struct ibmvmc_hmc *hmc; + struct ibmvmc_buffer *buffer; + size_t bytes; + char print_buffer[HMC_ID_LEN + 1]; + unsigned long flags; + long rc = 0; + + /* Reserve HMC session */ + hmc = session->hmc; + if (!hmc) { + rc = ibmvmc_setup_hmc(session); + if (rc) + return rc; + + hmc = session->hmc; + if (!hmc) { + pr_err("ibmvmc: setup_hmc success but no hmc\n"); + return -EIO; + } + } + + if (hmc->state != ibmhmc_state_initial) { + pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n", + hmc->state); + return -EIO; + } + + bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN); + if (bytes) + return -EFAULT; + + /* Send Open Session command */ + spin_lock_irqsave(&hmc->lock, flags); + buffer = ibmvmc_get_valid_hmc_buffer(hmc->index); + spin_unlock_irqrestore(&hmc->lock, flags); + + if (!buffer || !buffer->real_addr_local) { + pr_warn("ibmvmc: sethmcid: no buffer available\n"); + return -EIO; + } + + /* Make sure buffer is NULL terminated before trying to print it */ + memset(print_buffer, 0, HMC_ID_LEN + 1); + strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN); + pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer); + + memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN); + /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */ + rc = ibmvmc_send_open(buffer, hmc); + + return rc; +} + +/** + * ibmvmc_ioctl_query - IOCTL Query + * + * @session: ibmvmc_file_session struct + * @ret_struct: ibmvmc_query_struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session, + struct ibmvmc_query_struct __user *ret_struct) +{ + struct ibmvmc_query_struct query_struct; + size_t bytes; + + memset(&query_struct, 0, sizeof(query_struct)); + query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial); + query_struct.state = ibmvmc.state; + query_struct.vmc_drc_index = ibmvmc.vmc_drc_index; + + bytes = copy_to_user(ret_struct, &query_struct, + sizeof(query_struct)); + if (bytes) + return -EFAULT; + + return 0; +} + +/** + * ibmvmc_ioctl_requestvmc - IOCTL Request VMC + * + * @session: ibmvmc_file_session struct + * @ret_vmc_index: VMC Index + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session, + u32 __user *ret_vmc_index) +{ + /* TODO: (adreznec) Add locking to control multiple process access */ + size_t bytes; + long rc; + u32 vmc_drc_index; + + /* Call to request the VMC device from phyp*/ + rc = h_request_vmc(&vmc_drc_index); + pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc); + + if (rc == H_SUCCESS) { + rc = 0; + } else if (rc == H_FUNCTION) { + pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n"); + return -EPERM; + } else if (rc == H_AUTHORITY) { + pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n"); + return -EPERM; + } else if (rc == H_HARDWARE) { + pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n"); + return -EIO; + } else if (rc == H_RESOURCE) { + pr_err("ibmvmc: requestvmc: vmc resource unavailable\n"); + return -ENODEV; + } else if (rc == H_NOT_AVAILABLE) { + pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n"); + return -EPERM; + } else if (rc == H_PARAMETER) { + pr_err("ibmvmc: requestvmc: invalid parameter\n"); + return -EINVAL; + } + + /* Success, set the vmc index in global struct */ + ibmvmc.vmc_drc_index = vmc_drc_index; + + bytes = copy_to_user(ret_vmc_index, &vmc_drc_index, + sizeof(*ret_vmc_index)); + if (bytes) { + pr_warn("ibmvmc: requestvmc: copy to user failed.\n"); + return -EFAULT; + } + return rc; +} + +/** + * ibmvmc_ioctl - IOCTL + * + * @session: ibmvmc_file_session struct + * @cmd: cmd field + * @arg: Argument field + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static long ibmvmc_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct ibmvmc_file_session *session = file->private_data; + + pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n", + (unsigned long)file, cmd, arg, + (unsigned long)session); + + if (!session) { + pr_warn("ibmvmc: ioctl: no session\n"); + return -EIO; + } + + switch (cmd) { + case VMC_IOCTL_SETHMCID: + return ibmvmc_ioctl_sethmcid(session, + (unsigned char __user *)arg); + case VMC_IOCTL_QUERY: + return ibmvmc_ioctl_query(session, + (struct ibmvmc_query_struct __user *)arg); + case VMC_IOCTL_REQUESTVMC: + return ibmvmc_ioctl_requestvmc(session, + (unsigned int __user *)arg); + default: + pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd); + return -EINVAL; + } +} + +static const struct file_operations ibmvmc_fops = { + .owner = THIS_MODULE, + .read = ibmvmc_read, + .write = ibmvmc_write, + .poll = ibmvmc_poll, + .unlocked_ioctl = ibmvmc_ioctl, + .open = ibmvmc_open, + .release = ibmvmc_close, +}; + +/** + * ibmvmc_add_buffer - Add Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message transfers a buffer from hypervisor ownership to management + * partition ownership. The LIOBA is obtained from the virtual TCE table + * associated with the hypervisor side of the VMC device, and points to a + * buffer of size MTU (as established in the capabilities exchange). + * + * Typical flow for ading buffers: + * 1. A new management application connection is opened by the management + * partition. + * 2. The hypervisor assigns new buffers for the traffic associated with + * that connection. + * 3. The hypervisor sends VMC Add Buffer messages to the management + * partition, informing it of the new buffers. + * 4. The hypervisor sends an HMC protocol message (to the management + * application) notifying it of the new buffers. This informs the + * application that it has buffers available for sending HMC + * commands. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_add_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = &hmcs[hmc_index].buffer[buffer_id]; + + if (buffer->real_addr_local || buffer->dma_addr_local) { + dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n", + (unsigned long)buffer_id); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + &buffer->dma_addr_local); + + if (!buffer->real_addr_local) { + dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE, + hmc_session, hmc_index, buffer_id); + return -1; + } + + buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba); + buffer->size = ibmvmc.max_mtu; + buffer->owner = crq->var1.owner; + buffer->free = 1; + /* Must ensure valid==1 is observable only after all other fields are */ + dma_wmb(); + buffer->valid = 1; + buffer->id = buffer_id; + + dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n"); + dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n", + hmc_index, hmc_session, buffer_id, buffer->owner); + dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n", + (u32)buffer->dma_addr_local, + (u32)buffer->dma_addr_remote); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +/** + * ibmvmc_rem_buffer - Remove Buffer + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * This message requests an HMC buffer to be transferred from management + * partition ownership to hypervisor ownership. The management partition may + * not be able to satisfy the request at a particular point in time if all its + * buffers are in use. The management partition requires a depth of at least + * one inbound buffer to allow management application commands to flow to the + * hypervisor. It is, therefore, an interface error for the hypervisor to + * attempt to remove the management partition's last buffer. + * + * The hypervisor is expected to manage buffer usage with the management + * application directly and inform the management partition when buffers may be + * removed. The typical flow for removing buffers: + * + * 1. The management application no longer needs a communication path to a + * particular hypervisor function. That function is closed. + * 2. The hypervisor and the management application quiesce all traffic to that + * function. The hypervisor requests a reduction in buffer pool size. + * 3. The management application acknowledges the reduction in buffer pool size. + * 4. The hypervisor sends a Remove Buffer message to the management partition, + * informing it of the reduction in buffers. + * 5. The management partition verifies it can remove the buffer. This is + * possible if buffers have been quiesced. + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +/* + * The hypervisor requested that we pick an unused buffer, and return it. + * Before sending the buffer back, we free any storage associated with the + * buffer. + */ +static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id = 0; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + spin_lock_irqsave(&hmcs[hmc_index].lock, flags); + buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index); + if (!buffer) { + dev_info(adapter->dev, "rem_buffer: no buffer to remove\n"); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER, + hmc_session, hmc_index, + VMC_INVALID_BUFFER_ID); + return -1; + } + + buffer_id = buffer->id; + + if (buffer->valid) + free_dma_buffer(to_vio_dev(adapter->dev), + ibmvmc.max_mtu, + buffer->real_addr_local, + buffer->dma_addr_local); + + memset(buffer, 0, sizeof(struct ibmvmc_buffer)); + spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); + + dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id); + ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session, + hmc_index, buffer_id); + + return rc; +} + +static int ibmvmc_recv_msg(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + struct ibmvmc_buffer *buffer; + struct ibmvmc_hmc *hmc; + unsigned long msg_len; + u8 hmc_index; + u8 hmc_session; + u16 buffer_id; + unsigned long flags; + int rc = 0; + + if (!crq) + return -1; + + /* Hypervisor writes CRQs directly into our memory in big endian */ + dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n", + be64_to_cpu(*((unsigned long *)crq)), + be64_to_cpu(*(((unsigned long *)crq) + 1))); + + hmc_session = crq->hmc_session; + hmc_index = crq->hmc_index; + buffer_id = be16_to_cpu(crq->var2.buffer_id); + msg_len = be32_to_cpu(crq->var3.msg_len); + + if (hmc_index > ibmvmc.max_hmc_index) { + dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n", + hmc_index); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX, + hmc_session, hmc_index, buffer_id); + return -1; + } + + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n", + buffer_id); + ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID, + hmc_session, hmc_index, buffer_id); + return -1; + } + + hmc = &hmcs[hmc_index]; + spin_lock_irqsave(&hmc->lock, flags); + + if (hmc->state == ibmhmc_state_free) { + dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n", + hmc->state); + /* HMC connection is not valid (possibly was reset under us). */ + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + buffer = &hmc->buffer[buffer_id]; + + if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) { + dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n", + buffer->valid, buffer->owner); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* RDMA the data into the partition. */ + rc = h_copy_rdma(msg_len, + adapter->riobn, + buffer->dma_addr_remote, + adapter->liobn, + buffer->dma_addr_local); + + dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n", + (unsigned int)msg_len, (unsigned int)buffer_id, + (unsigned int)hmc->queue_head, (unsigned int)hmc_index); + buffer->msg_len = msg_len; + buffer->free = 0; + buffer->owner = VMC_BUF_OWNER_ALPHA; + + if (rc) { + dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n", + rc); + spin_unlock_irqrestore(&hmc->lock, flags); + return -1; + } + + /* Must be locked because read operates on the same data */ + hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id; + hmc->queue_head++; + if (hmc->queue_head == ibmvmc_max_buf_pool_size) + hmc->queue_head = 0; + + if (hmc->queue_head == hmc->queue_tail) + dev_err(adapter->dev, "outbound buffer queue wrapped.\n"); + + spin_unlock_irqrestore(&hmc->lock, flags); + + wake_up_interruptible(&ibmvmc_read_wait); + + return 0; +} + +/** + * ibmvmc_process_capabilities - Process Capabilities + * + * @adapter: crq_server_adapter struct + * @crqp: ibmvmc_crq_msg struct + * + */ +static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crqp) +{ + struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp; + + if ((be16_to_cpu(crq->version) >> 8) != + (IBMVMC_PROTOCOL_VERSION >> 8)) { + dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n", + be16_to_cpu(crq->version), + IBMVMC_PROTOCOL_VERSION); + ibmvmc.state = ibmvmc_state_failed; + return; + } + + ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu)); + ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size, + be16_to_cpu(crq->pool_size)); + ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1; + ibmvmc.state = ibmvmc_state_ready; + + dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n", + ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size, + ibmvmc.max_hmc_index); +} + +/** + * ibmvmc_validate_hmc_session - Validate HMC Session + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + + if (crq->hmc_session == 0) + return 0; + + if (hmc_index > ibmvmc.max_hmc_index) + return -1; + + if (hmcs[hmc_index].session != crq->hmc_session) { + dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n", + hmcs[hmc_index].session, crq->hmc_session); + return -1; + } + + return 0; +} + +/** + * ibmvmc_reset - Reset + * + * @adapter: crq_server_adapter struct + * @xport_event: export_event field + * + * Closes all HMC sessions and conditionally schedules a CRQ reset. + * @xport_event: If true, the partner closed their CRQ; we don't need to reset. + * If false, we need to schedule a CRQ reset. + */ +static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event) +{ + int i; + + if (ibmvmc.state != ibmvmc_state_sched_reset) { + dev_info(adapter->dev, "*** Reset to initial state.\n"); + for (i = 0; i < ibmvmc_max_hmcs; i++) + ibmvmc_return_hmc(&hmcs[i], xport_event); + + if (xport_event) { + /* CRQ was closed by the partner. We don't need to do + * anything except set ourself to the correct state to + * handle init msgs. + */ + ibmvmc.state = ibmvmc_state_crqinit; + } else { + /* The partner did not close their CRQ - instead, we're + * closing the CRQ on our end. Need to schedule this + * for process context, because CRQ reset may require a + * sleep. + * + * Setting ibmvmc.state here immediately prevents + * ibmvmc_open from completing until the reset + * completes in process context. + */ + ibmvmc.state = ibmvmc_state_sched_reset; + dev_dbg(adapter->dev, "Device reset scheduled"); + wake_up_interruptible(&adapter->reset_wait_queue); + } + } +} + +/** + * ibmvmc_reset_task - Reset Task + * + * @data: Data field + * + * Performs a CRQ reset of the VMC device in process context. + * NOTE: This function should not be called directly, use ibmvmc_reset. + */ +static int ibmvmc_reset_task(void *data) +{ + struct crq_server_adapter *adapter = data; + int rc; + + set_user_nice(current, -20); + + while (!kthread_should_stop()) { + wait_event_interruptible(adapter->reset_wait_queue, + (ibmvmc.state == ibmvmc_state_sched_reset) || + kthread_should_stop()); + + if (kthread_should_stop()) + break; + + dev_dbg(adapter->dev, "CRQ resetting in process context"); + tasklet_disable(&adapter->work_task); + + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc != H_SUCCESS && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + } else { + ibmvmc.state = ibmvmc_state_crqinit; + + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) + != 0 && rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + } + + vio_enable_interrupts(to_vio_dev(adapter->dev)); + tasklet_enable(&adapter->work_task); + } + + return 0; +} + +/** + * ibmvmc_process_open_resp - Process Open Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the Interface + * Open message. When this message is received, the indicated buffer is + * again available for management partition use. + */ +static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + unsigned short buffer_id; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + /* Why would PHYP give an index > max negotiated? */ + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_return_hmc(&hmcs[hmc_index], false); + return; + } + + if (hmcs[hmc_index].state == ibmhmc_state_opening) { + buffer_id = be16_to_cpu(crq->var2.buffer_id); + if (buffer_id >= ibmvmc.max_buffer_pool_size) { + dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n", + buffer_id); + hmcs[hmc_index].state = ibmhmc_state_failed; + } else { + ibmvmc_free_hmc_buffer(&hmcs[hmc_index], + &hmcs[hmc_index].buffer[buffer_id]); + hmcs[hmc_index].state = ibmhmc_state_ready; + dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n"); + } + } else { + dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n", + hmcs[hmc_index].state); + } +} + +/** + * ibmvmc_process_close_resp - Process Close Response + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * This command is sent by the hypervisor in response to the managemant + * application Interface Close message. + * + * If the close fails, simply reset the entire driver as the state of the VMC + * must be in tough shape. + */ +static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + unsigned char hmc_index; + + hmc_index = crq->hmc_index; + if (hmc_index > ibmvmc.max_hmc_index) { + ibmvmc_reset(adapter, false); + return; + } + + if (crq->status) { + dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n", + crq->status); + ibmvmc_reset(adapter, false); + return; + } + + ibmvmc_return_hmc(&hmcs[hmc_index], false); +} + +/** + * ibmvmc_crq_process - Process CRQ + * + * @adapter: crq_server_adapter struct + * @crq: ibmvmc_crq_msg struct + * + * Process the CRQ message based upon the type of message received. + * + */ +static void ibmvmc_crq_process(struct crq_server_adapter *adapter, + struct ibmvmc_crq_msg *crq) +{ + switch (crq->type) { + case VMC_MSG_CAP_RESP: + dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n", + crq->type); + if (ibmvmc.state == ibmvmc_state_capabilities) + ibmvmc_process_capabilities(adapter, crq); + else + dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n", + ibmvmc.state); + break; + case VMC_MSG_OPEN_RESP: + dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_open_resp(crq, adapter); + break; + case VMC_MSG_ADD_BUF: + dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_add_buffer(adapter, crq); + break; + case VMC_MSG_REM_BUF: + dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_rem_buffer(adapter, crq); + break; + case VMC_MSG_SIGNAL: + dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_recv_msg(adapter, crq); + break; + case VMC_MSG_CLOSE_RESP: + dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n", + crq->type); + if (ibmvmc_validate_hmc_session(adapter, crq) == 0) + ibmvmc_process_close_resp(crq, adapter); + break; + case VMC_MSG_CAP: + case VMC_MSG_OPEN: + case VMC_MSG_CLOSE: + case VMC_MSG_ADD_BUF_RESP: + case VMC_MSG_REM_BUF_RESP: + dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n", + crq->type); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n", + crq->type); + break; + } +} + +/** + * ibmvmc_handle_crq_init - Handle CRQ Init + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Handle the type of crq initialization based on whether + * it is a message or a response. + * + */ +static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->type) { + case 0x01: /* Initialization message */ + dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) { + /* Send back a response */ + if (ibmvmc_send_crq(adapter, 0xC002000000000000, + 0) == 0) + ibmvmc_send_capabilities(adapter); + else + dev_err(adapter->dev, " Unable to send init rsp\n"); + } else { + dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n", + ibmvmc.state, ibmvmc.max_mtu); + } + + break; + case 0x02: /* Initialization response */ + dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n", + ibmvmc.state); + if (ibmvmc.state == ibmvmc_state_crqinit) + ibmvmc_send_capabilities(adapter); + break; + default: + dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n", + (unsigned long)crq->type); + } +} + +/** + * ibmvmc_handle_crq - Handle CRQ + * + * @crq: ibmvmc_crq_msg struct + * @adapter: crq_server_adapter struct + * + * Read the command elements from the command queue and execute the + * requests based upon the type of crq message. + * + */ +static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq, + struct crq_server_adapter *adapter) +{ + switch (crq->valid) { + case 0xC0: /* initialization */ + ibmvmc_handle_crq_init(crq, adapter); + break; + case 0xFF: /* Hypervisor telling us the connection is closed */ + dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n"); + ibmvmc_reset(adapter, true); + break; + case 0x80: /* real payload */ + ibmvmc_crq_process(adapter, crq); + break; + default: + dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n", + crq->valid); + break; + } +} + +static void ibmvmc_task(unsigned long data) +{ + struct crq_server_adapter *adapter = + (struct crq_server_adapter *)data; + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct ibmvmc_crq_msg *crq; + int done = 0; + + while (!done) { + /* Pull all the valid messages off the CRQ */ + while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) { + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } + + vio_enable_interrupts(vdev); + crq = crq_queue_next_crq(&adapter->queue); + if (crq) { + vio_disable_interrupts(vdev); + ibmvmc_handle_crq(crq, adapter); + crq->valid = 0x00; + /* CRQ reset was requested, stop processing CRQs. + * Interrupts will be re-enabled by the reset task. + */ + if (ibmvmc.state == ibmvmc_state_sched_reset) + return; + } else { + done = 1; + } + } +} + +/** + * ibmvmc_init_crq_queue - Init CRQ Queue + * + * @adapter: crq_server_adapter struct + * + * Return: + * 0 - Success + * Non-zero - Failure + */ +static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter) +{ + struct vio_dev *vdev = to_vio_dev(adapter->dev); + struct crq_queue *queue = &adapter->queue; + int rc = 0; + int retrc = 0; + + queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL); + + if (!queue->msgs) + goto malloc_failed; + + queue->size = PAGE_SIZE / sizeof(*queue->msgs); + + queue->msg_token = dma_map_single(adapter->dev, queue->msgs, + queue->size * sizeof(*queue->msgs), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(adapter->dev, queue->msg_token)) + goto map_failed; + + retrc = plpar_hcall_norets(H_REG_CRQ, + vdev->unit_address, + queue->msg_token, PAGE_SIZE); + retrc = rc; + + if (rc == H_RESOURCE) + rc = ibmvmc_reset_crq_queue(adapter); + + if (rc == 2) { + dev_warn(adapter->dev, "Partner adapter not ready\n"); + retrc = 0; + } else if (rc != 0) { + dev_err(adapter->dev, "Error %d opening adapter\n", rc); + goto reg_crq_failed; + } + + queue->cur = 0; + spin_lock_init(&queue->lock); + + tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter); + + if (request_irq(vdev->irq, + ibmvmc_handle_event, + 0, "ibmvmc", (void *)adapter) != 0) { + dev_err(adapter->dev, "couldn't register irq 0x%x\n", + vdev->irq); + goto req_irq_failed; + } + + rc = vio_enable_interrupts(vdev); + if (rc != 0) { + dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc); + goto req_irq_failed; + } + + return retrc; + +req_irq_failed: + /* Cannot have any work since we either never got our IRQ registered, + * or never got interrupts enabled + */ + tasklet_kill(&adapter->work_task); + h_free_crq(vdev->unit_address); +reg_crq_failed: + dma_unmap_single(adapter->dev, + queue->msg_token, + queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); +map_failed: + free_page((unsigned long)queue->msgs); +malloc_failed: + return -ENOMEM; +} + +/* Fill in the liobn and riobn fields on the adapter */ +static int read_dma_window(struct vio_dev *vdev, + struct crq_server_adapter *adapter) +{ + const __be32 *dma_window; + const __be32 *prop; + + /* TODO Using of_parse_dma_window would be better, but it doesn't give + * a way to read multiple windows without already knowing the size of + * a window or the number of windows + */ + dma_window = + (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window", + NULL); + if (!dma_window) { + dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n"); + return -1; + } + + adapter->liobn = be32_to_cpu(*dma_window); + dma_window++; + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", + NULL); + if (!prop) { + dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n"); + dma_window++; + } else { + dma_window += be32_to_cpu(*prop); + } + + /* dma_window should point to the second window now */ + adapter->riobn = be32_to_cpu(*dma_window); + + return 0; +} + +static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct crq_server_adapter *adapter = &ibmvmc_adapter; + int rc; + + dev_set_drvdata(&vdev->dev, NULL); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = &vdev->dev; + + dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address); + + rc = read_dma_window(vdev, adapter); + if (rc != 0) { + ibmvmc.state = ibmvmc_state_failed; + return -1; + } + + dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n", + adapter->liobn, adapter->riobn); + + init_waitqueue_head(&adapter->reset_wait_queue); + adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc"); + if (IS_ERR(adapter->reset_task)) { + dev_err(adapter->dev, "Failed to start reset thread\n"); + ibmvmc.state = ibmvmc_state_failed; + rc = PTR_ERR(adapter->reset_task); + adapter->reset_task = NULL; + return rc; + } + + rc = ibmvmc_init_crq_queue(adapter); + if (rc != 0 && rc != H_RESOURCE) { + dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", + rc); + ibmvmc.state = ibmvmc_state_failed; + goto crq_failed; + } + + ibmvmc.state = ibmvmc_state_crqinit; + + /* Try to send an initialization message. Note that this is allowed + * to fail if the other end is not acive. In that case we just wait + * for the other side to initialize. + */ + if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 && + rc != H_RESOURCE) + dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); + + dev_set_drvdata(&vdev->dev, adapter); + + return 0; + +crq_failed: + kthread_stop(adapter->reset_task); + adapter->reset_task = NULL; + return -EPERM; +} + +static int ibmvmc_remove(struct vio_dev *vdev) +{ + struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev); + + dev_info(adapter->dev, "Entering remove for UA 0x%x\n", + vdev->unit_address); + ibmvmc_release_crq_queue(adapter); + + return 0; +} + +static struct vio_device_id ibmvmc_device_table[] = { + { "ibm,vmc", "IBM,vmc" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, ibmvmc_device_table); + +static struct vio_driver ibmvmc_driver = { + .name = ibmvmc_driver_name, + .id_table = ibmvmc_device_table, + .probe = ibmvmc_probe, + .remove = ibmvmc_remove, +}; + +static void __init ibmvmc_scrub_module_parms(void) +{ + if (ibmvmc_max_mtu > MAX_MTU) { + pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU); + ibmvmc_max_mtu = MAX_MTU; + } else if (ibmvmc_max_mtu < MIN_MTU) { + pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU); + ibmvmc_max_mtu = MIN_MTU; + } + + if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size reduced to %d\n", + MAX_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE; + } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) { + pr_warn("ibmvmc: Max buffer pool size increased to %d\n", + MIN_BUF_POOL_SIZE); + ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE; + } + + if (ibmvmc_max_hmcs > MAX_HMCS) { + pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS); + ibmvmc_max_hmcs = MAX_HMCS; + } else if (ibmvmc_max_hmcs < MIN_HMCS) { + pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS); + ibmvmc_max_hmcs = MIN_HMCS; + } +} + +static struct miscdevice ibmvmc_miscdev = { + .name = ibmvmc_driver_name, + .minor = MISC_DYNAMIC_MINOR, + .fops = &ibmvmc_fops, +}; + +static int __init ibmvmc_module_init(void) +{ + int rc, i, j; + + ibmvmc.state = ibmvmc_state_initial; + pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION); + + rc = misc_register(&ibmvmc_miscdev); + if (rc) { + pr_err("ibmvmc: misc registration failed\n"); + goto misc_register_failed; + } + pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR, + ibmvmc_miscdev.minor); + + /* Initialize data structures */ + memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS); + for (i = 0; i < MAX_HMCS; i++) { + spin_lock_init(&hmcs[i].lock); + hmcs[i].state = ibmhmc_state_free; + for (j = 0; j < MAX_BUF_POOL_SIZE; j++) + hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID; + } + + /* Sanity check module parms */ + ibmvmc_scrub_module_parms(); + + /* + * Initialize some reasonable values. Might be negotiated smaller + * values during the capabilities exchange. + */ + ibmvmc.max_mtu = ibmvmc_max_mtu; + ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size; + ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1; + + rc = vio_register_driver(&ibmvmc_driver); + + if (rc) { + pr_err("ibmvmc: rc %d from vio_register_driver\n", rc); + goto vio_reg_failed; + } + + return 0; + +vio_reg_failed: + misc_deregister(&ibmvmc_miscdev); +misc_register_failed: + return rc; +} + +static void __exit ibmvmc_module_exit(void) +{ + pr_info("ibmvmc: module exit\n"); + vio_unregister_driver(&ibmvmc_driver); + misc_deregister(&ibmvmc_miscdev); +} + +module_init(ibmvmc_module_init); +module_exit(ibmvmc_module_exit); + +module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size, + int, 0644); +MODULE_PARM_DESC(buf_pool_size, "Buffer pool size"); +module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644); +MODULE_PARM_DESC(max_hmcs, "Max HMCs"); +module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644); +MODULE_PARM_DESC(max_mtu, "Max MTU"); + +MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("IBM VMC"); +MODULE_VERSION(IBMVMC_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h new file mode 100644 index 000000000000..e140ada8fe2c --- /dev/null +++ b/drivers/misc/ibmvmc.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * linux/drivers/misc/ibmvmc.h + * + * IBM Power Systems Virtual Management Channel Support. + * + * Copyright (c) 2004, 2018 IBM Corp. + * Dave Engebretsen engebret@us.ibm.com + * Steven Royer seroyer@linux.vnet.ibm.com + * Adam Reznechek adreznec@linux.vnet.ibm.com + * Bryant G. Ly <bryantly@linux.vnet.ibm.com> + */ +#ifndef IBMVMC_H +#define IBMVMC_H + +#include <linux/types.h> +#include <linux/cdev.h> + +#include <asm/vio.h> + +#define IBMVMC_PROTOCOL_VERSION 0x0101 + +#define MIN_BUF_POOL_SIZE 16 +#define MIN_HMCS 1 +#define MIN_MTU 4096 +#define MAX_BUF_POOL_SIZE 64 +#define MAX_HMCS 2 +#define MAX_MTU (4 * 4096) +#define DEFAULT_BUF_POOL_SIZE 32 +#define DEFAULT_HMCS 1 +#define DEFAULT_MTU 4096 +#define HMC_ID_LEN 32 + +#define VMC_INVALID_BUFFER_ID 0xFFFF + +/* ioctl numbers */ +#define VMC_BASE 0xCC +#define VMC_IOCTL_SETHMCID _IOW(VMC_BASE, 0x00, unsigned char *) +#define VMC_IOCTL_QUERY _IOR(VMC_BASE, 0x01, struct ibmvmc_query_struct) +#define VMC_IOCTL_REQUESTVMC _IOR(VMC_BASE, 0x02, u32) + +#define VMC_MSG_CAP 0x01 +#define VMC_MSG_CAP_RESP 0x81 +#define VMC_MSG_OPEN 0x02 +#define VMC_MSG_OPEN_RESP 0x82 +#define VMC_MSG_CLOSE 0x03 +#define VMC_MSG_CLOSE_RESP 0x83 +#define VMC_MSG_ADD_BUF 0x04 +#define VMC_MSG_ADD_BUF_RESP 0x84 +#define VMC_MSG_REM_BUF 0x05 +#define VMC_MSG_REM_BUF_RESP 0x85 +#define VMC_MSG_SIGNAL 0x06 + +#define VMC_MSG_SUCCESS 0 +#define VMC_MSG_INVALID_HMC_INDEX 1 +#define VMC_MSG_INVALID_BUFFER_ID 2 +#define VMC_MSG_CLOSED_HMC 3 +#define VMC_MSG_INTERFACE_FAILURE 4 +#define VMC_MSG_NO_BUFFER 5 + +#define VMC_BUF_OWNER_ALPHA 0 +#define VMC_BUF_OWNER_HV 1 + +enum ibmvmc_states { + ibmvmc_state_sched_reset = -1, + ibmvmc_state_initial = 0, + ibmvmc_state_crqinit = 1, + ibmvmc_state_capabilities = 2, + ibmvmc_state_ready = 3, + ibmvmc_state_failed = 4, +}; + +enum ibmhmc_states { + /* HMC connection not established */ + ibmhmc_state_free = 0, + + /* HMC connection established (open called) */ + ibmhmc_state_initial = 1, + + /* open msg sent to HV, due to ioctl(1) call */ + ibmhmc_state_opening = 2, + + /* HMC connection ready, open resp msg from HV */ + ibmhmc_state_ready = 3, + + /* HMC connection failure */ + ibmhmc_state_failed = 4, +}; + +struct ibmvmc_buffer { + u8 valid; /* 1 when DMA storage allocated to buffer */ + u8 free; /* 1 when buffer available for the Alpha Partition */ + u8 owner; + u16 id; + u32 size; + u32 msg_len; + dma_addr_t dma_addr_local; + dma_addr_t dma_addr_remote; + void *real_addr_local; +}; + +struct ibmvmc_admin_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status. Zero is success and on failure, + * either 1 - General Failure, or 2 - Invalid Version is + * returned. + */ + u8 rsvd[2]; + u8 max_hmc; /* Max # of independent HMC connections supported */ + __be16 pool_size; /* Maximum number of buffers supported per HMC + * connection + */ + __be32 max_mtu; /* Maximum message size supported (bytes) */ + __be16 crq_size; /* # of entries available in the CRQ for the + * source partition. The target partition must + * limit the number of outstanding messages to + * one half or less. + */ + __be16 version; /* Indicates the code level of the management partition + * or the hypervisor with the high-order byte + * indicating a major version and the low-order byte + * indicating a minor version. + */ +}; + +struct ibmvmc_crq_msg { + u8 valid; /* RPA Defined */ + u8 type; /* ibmvmc msg type */ + u8 status; /* Response msg status */ + union { + u8 rsvd; /* Reserved */ + u8 owner; + } var1; + u8 hmc_session; /* Session Identifier for the current VMC connection */ + u8 hmc_index; /* A unique HMC Idx would be used if multiple management + * applications running concurrently were desired + */ + union { + __be16 rsvd; + __be16 buffer_id; + } var2; + __be32 rsvd; + union { + __be32 rsvd; + __be32 lioba; + __be32 msg_len; + } var3; +}; + +/* an RPA command/response transport queue */ +struct crq_queue { + struct ibmvmc_crq_msg *msgs; + int size, cur; + dma_addr_t msg_token; + spinlock_t lock; +}; + +/* VMC server adapter settings */ +struct crq_server_adapter { + struct device *dev; + struct crq_queue queue; + u32 liobn; + u32 riobn; + struct tasklet_struct work_task; + wait_queue_head_t reset_wait_queue; + struct task_struct *reset_task; +}; + +/* Driver wide settings */ +struct ibmvmc_struct { + u32 state; + u32 max_mtu; + u32 max_buffer_pool_size; + u32 max_hmc_index; + struct crq_server_adapter *adapter; + struct cdev cdev; + u32 vmc_drc_index; +}; + +struct ibmvmc_file_session; + +/* Connection specific settings */ +struct ibmvmc_hmc { + u8 session; + u8 index; + u32 state; + struct crq_server_adapter *adapter; + spinlock_t lock; + unsigned char hmc_id[HMC_ID_LEN]; + struct ibmvmc_buffer buffer[MAX_BUF_POOL_SIZE]; + unsigned short queue_outbound_msgs[MAX_BUF_POOL_SIZE]; + int queue_head, queue_tail; + struct ibmvmc_file_session *file_session; +}; + +struct ibmvmc_file_session { + struct file *file; + struct ibmvmc_hmc *hmc; + bool valid; +}; + +struct ibmvmc_query_struct { + int have_vmc; + int state; + int vmc_drc_index; +}; + +#endif /* __IBMVMC_H */ diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 3641f1334cf0..ab174f28e3be 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -926,7 +926,7 @@ again: * * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. */ -int gru_fault(struct vm_fault *vmf) +vm_fault_t gru_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct gru_thread_state *gts; diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index b5e308b50ed1..3e041b6f7a68 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -147,6 +147,7 @@ #include <linux/mutex.h> #include <linux/wait.h> #include <linux/mmu_notifier.h> +#include <linux/mm_types.h> #include "gru.h" #include "grulib.h" #include "gruhandles.h" @@ -665,7 +666,7 @@ extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, char *cbmap); extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, char *dsmap); -extern int gru_fault(struct vm_fault *vmf); +extern vm_fault_t gru_fault(struct vm_fault *vmf); extern struct gru_mm_struct *gru_register_mmu_notifier(void); extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 0c26eaf5f62b..216d5c756236 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -407,7 +407,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, * destination partid. If the destination partid octets are 0xffff, * this packet is to be broadcast to all connected partitions. */ -static int +static netdev_tx_t xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xpnet_pending_msg *queued_msg; diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index b77aacafc3fc..5ec3f5a43718 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c @@ -735,7 +735,7 @@ static int kim_probe(struct platform_device *pdev) st_kim_devices[0] = pdev; } - kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); + kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_KERNEL); if (!kim_gdata) { pr_err("no mem to allocate"); return -ENOMEM; diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index e5f108713dd8..9ac95b48ef92 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c @@ -239,9 +239,13 @@ static int tifm_7xx1_resume(struct pci_dev *dev) unsigned long timeout; unsigned int good_sockets = 0, bad_sockets = 0; unsigned long flags; - unsigned char new_ids[fm->num_sockets]; + /* Maximum number of entries is 4 */ + unsigned char new_ids[4]; DECLARE_COMPLETION_ONSTACK(finish_resume); + if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids))) + return -ENXIO; + pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); rc = pci_enable_device(dev); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 9047c0a529b2..efd733472a35 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b) } } - if (b->batch_page) { - vunmap(b->batch_page); - b->batch_page = NULL; - } - - if (b->page) { - __free_page(b->page); - b->page = NULL; - } + /* Clearing the batch_page unconditionally has no adverse effect */ + free_page((unsigned long)b->batch_page); + b->batch_page = NULL; } /* @@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = { static bool vmballoon_init_batching(struct vmballoon *b) { - b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP); - if (!b->page) - return false; + struct page *page; - b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL); - if (!b->batch_page) { - __free_page(b->page); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) return false; - } + b->batch_page = page_address(page); return true; } diff --git a/drivers/mux/adg792a.c b/drivers/mux/adg792a.c index 6a8725cf3d71..e8fc2fc1ab09 100644 --- a/drivers/mux/adg792a.c +++ b/drivers/mux/adg792a.c @@ -58,8 +58,7 @@ static const struct mux_control_ops adg792a_ops = { .set = adg792a_set, }; -static int adg792a_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int adg792a_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; struct mux_chip *mux_chip; @@ -144,7 +143,7 @@ static struct i2c_driver adg792a_driver = { .name = "adg792a", .of_match_table = of_match_ptr(adg792a_of_match), }, - .probe = adg792a_probe, + .probe_new = adg792a_probe, .id_table = adg792a_id, }; module_i2c_driver(adg792a_driver); diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c index d306c348c857..a59b6c4bb5b8 100644 --- a/drivers/nubus/bus.c +++ b/drivers/nubus/bus.c @@ -63,20 +63,15 @@ static struct device nubus_parent = { .init_name = "nubus", }; -int __init nubus_bus_register(void) +static int __init nubus_bus_register(void) { - int err; - - err = device_register(&nubus_parent); - if (err) - return err; - - err = bus_register(&nubus_bus_type); - if (!err) - return 0; + return bus_register(&nubus_bus_type); +} +postcore_initcall(nubus_bus_register); - device_unregister(&nubus_parent); - return err; +int __init nubus_parent_device_register(void) +{ + return device_register(&nubus_parent); } static void nubus_device_release(struct device *dev) diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c index 4621ff98138c..bb0d63a44f41 100644 --- a/drivers/nubus/nubus.c +++ b/drivers/nubus/nubus.c @@ -875,7 +875,7 @@ static int __init nubus_init(void) return 0; nubus_proc_init(); - err = nubus_bus_register(); + err = nubus_parent_device_register(); if (err) return err; nubus_scan_bus(); diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 1090924efdb1..54a3c298247b 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -175,4 +175,10 @@ config NVMEM_SNVS_LPGPR This driver can also be built as a module. If so, the module will be called nvmem-snvs-lpgpr. +config RAVE_SP_EEPROM + tristate "Rave SP EEPROM Support" + depends on RAVE_SP_CORE + help + Say y here to enable Rave SP EEPROM support. + endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index e54dcfa6565a..27e96a8efd1c 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -37,3 +37,6 @@ obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o nvmem_meson_mx_efuse-y := meson-mx-efuse.o obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o nvmem_snvs_lpgpr-y := snvs_lpgpr.o +obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o +nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o + diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index b05aa8e81303..b5b0cdc21d01 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -353,18 +353,27 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, return 0; } -static int nvmem_add_cells(struct nvmem_device *nvmem, - const struct nvmem_config *cfg) +/** + * nvmem_add_cells() - Add cell information to an nvmem device + * + * @nvmem: nvmem device to add cells to. + * @info: nvmem cell info to add to the device + * @ncells: number of cells in info + * + * Return: 0 or negative error code on failure. + */ +int nvmem_add_cells(struct nvmem_device *nvmem, + const struct nvmem_cell_info *info, + int ncells) { struct nvmem_cell **cells; - const struct nvmem_cell_info *info = cfg->cells; int i, rval; - cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); + cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL); if (!cells) return -ENOMEM; - for (i = 0; i < cfg->ncells; i++) { + for (i = 0; i < ncells; i++) { cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); if (!cells[i]) { rval = -ENOMEM; @@ -380,7 +389,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem, nvmem_cell_add(cells[i]); } - nvmem->ncells = cfg->ncells; + nvmem->ncells = ncells; /* remove tmp array */ kfree(cells); @@ -393,6 +402,7 @@ err: return rval; } +EXPORT_SYMBOL_GPL(nvmem_add_cells); /* * nvmem_setup_compat() - Create an additional binary entry in @@ -509,7 +519,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) } if (config->cells) - nvmem_add_cells(nvmem, config); + nvmem_add_cells(nvmem, config->cells, config->ncells); return nvmem; @@ -559,6 +569,7 @@ static void devm_nvmem_release(struct device *dev, void *res) * nvmem_config. * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem * + * @dev: Device that uses the nvmem device. * @config: nvmem device configuration with which nvmem device is created. * * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device @@ -597,6 +608,7 @@ static int devm_nvmem_match(struct device *dev, void *res, void *data) * devm_nvmem_unregister() - Unregister previously registered managed nvmem * device. * + * @dev: Device that uses the nvmem device. * @nvmem: Pointer to previously registered nvmem device. * * Return: Will be an negative on error or a zero on success. @@ -1107,6 +1119,8 @@ static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the first byte with lsb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); + if (rc) + goto err; *b++ |= GENMASK(bit_offset - 1, 0) & v; /* setup rest of the byte if any */ @@ -1125,11 +1139,16 @@ static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, /* setup the last byte with msb bits from nvmem */ rc = nvmem_reg_read(nvmem, cell->offset + cell->bytes - 1, &v, 1); + if (rc) + goto err; *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; } return buf; +err: + kfree(buf); + return ERR_PTR(rc); } /** diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c index 71823d1403c5..d769840d1e18 100644 --- a/drivers/nvmem/meson-efuse.c +++ b/drivers/nvmem/meson-efuse.c @@ -24,23 +24,16 @@ static int meson_efuse_read(void *context, unsigned int offset, void *val, size_t bytes) { - u8 *buf = val; - int ret; - - ret = meson_sm_call_read(buf, bytes, SM_EFUSE_READ, offset, - bytes, 0, 0, 0); - if (ret < 0) - return ret; - - return 0; + return meson_sm_call_read((u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); } -static struct nvmem_config econfig = { - .name = "meson-efuse", - .stride = 1, - .word_size = 1, - .read_only = true, -}; +static int meson_efuse_write(void *context, unsigned int offset, + void *val, size_t bytes) +{ + return meson_sm_call_write((u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); +} static const struct of_device_id meson_efuse_match[] = { { .compatible = "amlogic,meson-gxbb-efuse", }, @@ -50,17 +43,27 @@ MODULE_DEVICE_TABLE(of, meson_efuse_match); static int meson_efuse_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct nvmem_device *nvmem; + struct nvmem_config *econfig; unsigned int size; if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) return -EINVAL; - econfig.dev = &pdev->dev; - econfig.reg_read = meson_efuse_read; - econfig.size = size; + econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); + if (!econfig) + return -ENOMEM; + + econfig->dev = dev; + econfig->name = dev_name(dev); + econfig->stride = 1; + econfig->word_size = 1; + econfig->reg_read = meson_efuse_read; + econfig->reg_write = meson_efuse_write; + econfig->size = size; - nvmem = devm_nvmem_register(&pdev->dev, &econfig); + nvmem = devm_nvmem_register(&pdev->dev, econfig); return PTR_ERR_OR_ZERO(nvmem); } diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c new file mode 100644 index 000000000000..50aeea6ec6cc --- /dev/null +++ b/drivers/nvmem/rave-sp-eeprom.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * EEPROM driver for RAVE SP + * + * Copyright (C) 2018 Zodiac Inflight Innovations + * + */ +#include <linux/kernel.h> +#include <linux/mfd/rave-sp.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/sizes.h> + +/** + * enum rave_sp_eeprom_access_type - Supported types of EEPROM access + * + * @RAVE_SP_EEPROM_WRITE: EEPROM write + * @RAVE_SP_EEPROM_READ: EEPROM read + */ +enum rave_sp_eeprom_access_type { + RAVE_SP_EEPROM_WRITE = 0, + RAVE_SP_EEPROM_READ = 1, +}; + +/** + * enum rave_sp_eeprom_header_size - EEPROM command header sizes + * + * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K) + * @RAVE_SP_EEPROM_HEADER_BIG: EEPROM header size for "big" devices (> 8K) + */ +enum rave_sp_eeprom_header_size { + RAVE_SP_EEPROM_HEADER_SMALL = 4U, + RAVE_SP_EEPROM_HEADER_BIG = 5U, +}; + +#define RAVE_SP_EEPROM_PAGE_SIZE 32U + +/** + * struct rave_sp_eeprom_page - RAVE SP EEPROM page + * + * @type: Access type (see enum rave_sp_eeprom_access_type) + * @success: Success flag (Success = 1, Failure = 0) + * @data: Read data + + * Note this structure corresponds to RSP_*_EEPROM payload from RAVE + * SP ICD + */ +struct rave_sp_eeprom_page { + u8 type; + u8 success; + u8 data[RAVE_SP_EEPROM_PAGE_SIZE]; +} __packed; + +/** + * struct rave_sp_eeprom - RAVE SP EEPROM device + * + * @sp: Pointer to parent RAVE SP device + * @mutex: Lock protecting access to EEPROM + * @address: EEPROM device address + * @header_size: Size of EEPROM command header for this device + * @dev: Pointer to corresponding struct device used for logging + */ +struct rave_sp_eeprom { + struct rave_sp *sp; + struct mutex mutex; + u8 address; + unsigned int header_size; + struct device *dev; +}; + +/** + * rave_sp_eeprom_io - Low-level part of EEPROM page access + * + * @eeprom: EEPROM device to write to + * @type: EEPROM access type (read or write) + * @idx: number of the EEPROM page + * @page: Data to write or buffer to store result (via page->data) + * + * This function does all of the low-level work required to perform a + * EEPROM access. This includes formatting correct command payload, + * sending it and checking received results. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + u16 idx, + struct rave_sp_eeprom_page *page) +{ + const bool is_write = type == RAVE_SP_EEPROM_WRITE; + const unsigned int data_size = is_write ? sizeof(page->data) : 0; + const unsigned int cmd_size = eeprom->header_size + data_size; + const unsigned int rsp_size = + is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page); + unsigned int offset = 0; + u8 cmd[cmd_size]; + int ret; + + cmd[offset++] = eeprom->address; + cmd[offset++] = 0; + cmd[offset++] = type; + cmd[offset++] = idx; + + /* + * If there's still room in this command's header it means we + * are talkin to EEPROM that uses 16-bit page numbers and we + * have to specify index's MSB in payload as well. + */ + if (offset < eeprom->header_size) + cmd[offset++] = idx >> 8; + /* + * Copy our data to write to command buffer first. In case of + * a read data_size should be zero and memcpy would become a + * no-op + */ + memcpy(&cmd[offset], page->data, data_size); + + ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size); + if (ret) + return ret; + + if (page->type != type) + return -EPROTO; + + if (!page->success) + return -EIO; + + return 0; +} + +/** + * rave_sp_eeprom_page_access - Access single EEPROM page + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access to a single page or a + * portion thereof. Requested access MUST NOT cross the EEPROM page + * boundary. + * + * Returns zero in case of success or negative error code in + * case of failure. + */ +static int +rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + size_t data_len) +{ + const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; + const unsigned int page_nr = offset / RAVE_SP_EEPROM_PAGE_SIZE; + struct rave_sp_eeprom_page page; + int ret; + + /* + * This function will not work if data access we've been asked + * to do is crossing EEPROM page boundary. Normally this + * should never happen and getting here would indicate a bug + * in the code. + */ + if (WARN_ON(data_len > sizeof(page.data) - page_offset)) + return -EINVAL; + + if (type == RAVE_SP_EEPROM_WRITE) { + /* + * If doing a partial write we need to do a read first + * to fill the rest of the page with correct data. + */ + if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) { + ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ, + page_nr, &page); + if (ret) + return ret; + } + + memcpy(&page.data[page_offset], data, data_len); + } + + ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page); + if (ret) + return ret; + + /* + * Since we receive the result of the read via 'page.data' + * buffer we need to copy that to 'data' + */ + if (type == RAVE_SP_EEPROM_READ) + memcpy(data, &page.data[page_offset], data_len); + + return 0; +} + +/** + * rave_sp_eeprom_access - Access EEPROM data + * + * @eeprom: EEPROM device to access + * @type: Access type to perform (read or write) + * @offset: Offset within EEPROM to access + * @data: Data buffer + * @data_len: Size of the data buffer + * + * This function performs a generic access (either read or write) at + * arbitrary offset (not necessary page aligned) of arbitrary length + * (is not constrained by EEPROM page size). + * + * Returns zero in case of success or negative error code in case of + * failure. + */ +static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom, + enum rave_sp_eeprom_access_type type, + unsigned int offset, u8 *data, + unsigned int data_len) +{ + unsigned int residue; + unsigned int chunk; + unsigned int head; + int ret; + + mutex_lock(&eeprom->mutex); + + head = offset % RAVE_SP_EEPROM_PAGE_SIZE; + residue = data_len; + + do { + /* + * First iteration, if we are doing an access that is + * not 32-byte aligned, we need to access only data up + * to a page boundary to avoid corssing it in + * rave_sp_eeprom_page_access() + */ + if (unlikely(head)) { + chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; + /* + * This can only happen once per + * rave_sp_eeprom_access() call, so we set + * head to zero to process all the other + * iterations normally. + */ + head = 0; + } else { + chunk = RAVE_SP_EEPROM_PAGE_SIZE; + } + + /* + * We should never read more that 'residue' bytes + */ + chunk = min(chunk, residue); + ret = rave_sp_eeprom_page_access(eeprom, type, offset, + data, chunk); + if (ret) + goto out; + + residue -= chunk; + offset += chunk; + data += chunk; + } while (residue); +out: + mutex_unlock(&eeprom->mutex); + return ret; +} + +static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ, + offset, val, bytes); +} + +static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset, + void *val, size_t bytes) +{ + return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE, + offset, val, bytes); +} + +static int rave_sp_eeprom_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rave_sp *sp = dev_get_drvdata(dev->parent); + struct device_node *np = dev->of_node; + struct nvmem_config config = { 0 }; + struct rave_sp_eeprom *eeprom; + struct nvmem_device *nvmem; + u32 reg[2], size; + + if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) { + dev_err(dev, "Failed to parse \"reg\" property\n"); + return -EINVAL; + } + + size = reg[1]; + /* + * Per ICD, we have no more than 2 bytes to specify EEPROM + * page. + */ + if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) { + dev_err(dev, "Specified size is too big\n"); + return -EINVAL; + } + + eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + eeprom->address = reg[0]; + eeprom->sp = sp; + eeprom->dev = dev; + + if (size > SZ_8K) + eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG; + else + eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL; + + mutex_init(&eeprom->mutex); + + config.id = -1; + of_property_read_string(np, "zii,eeprom-name", &config.name); + config.priv = eeprom; + config.dev = dev; + config.size = size; + config.reg_read = rave_sp_eeprom_reg_read; + config.reg_write = rave_sp_eeprom_reg_write; + config.word_size = 1; + config.stride = 1; + + nvmem = devm_nvmem_register(dev, &config); + + return PTR_ERR_OR_ZERO(nvmem); +} + +static const struct of_device_id rave_sp_eeprom_of_match[] = { + { .compatible = "zii,rave-sp-eeprom" }, + {} +}; +MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match); + +static struct platform_driver rave_sp_eeprom_driver = { + .probe = rave_sp_eeprom_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rave_sp_eeprom_of_match, + }, +}; +module_platform_driver(rave_sp_eeprom_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>"); +MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>"); +MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>"); +MODULE_DESCRIPTION("RAVE SP EEPROM driver"); diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index ffb46f915334..bb36a8fbc9b1 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -439,13 +439,12 @@ static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf) static void qcom_slim_rxwq(struct work_struct *work) { u8 buf[SLIM_MSGQ_BUF_LEN]; - u8 mc, mt, len; + u8 mc, mt; int ret; struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl, wd); while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) { - len = SLIM_HEADER_GET_RL(buf[0]); mt = SLIM_HEADER_GET_MT(buf[0]); mc = SLIM_HEADER_GET_MC(buf[1]); if (mt == SLIM_MSG_MT_CORE && diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index b46084b4b1f8..19c8efb9a5ee 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -27,7 +27,7 @@ config SOUNDWIRE_INTEL tristate "Intel SoundWire Master driver" select SOUNDWIRE_CADENCE select SOUNDWIRE_BUS - depends on X86 && ACPI + depends on X86 && ACPI && SND_SOC ---help--- SoundWire Intel Master driver. If you have an Intel platform which has a SoundWire Master then diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile index e1a74c5692aa..5817beaca0e1 100644 --- a/drivers/soundwire/Makefile +++ b/drivers/soundwire/Makefile @@ -3,7 +3,7 @@ # #Bus Objs -soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o +soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o #Cadence Objs diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index d6dc8e7a8614..dcc0ff9f0c22 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -17,6 +17,7 @@ */ int sdw_add_bus_master(struct sdw_bus *bus) { + struct sdw_master_prop *prop = NULL; int ret; if (!bus->dev) { @@ -32,6 +33,7 @@ int sdw_add_bus_master(struct sdw_bus *bus) mutex_init(&bus->msg_lock); mutex_init(&bus->bus_lock); INIT_LIST_HEAD(&bus->slaves); + INIT_LIST_HEAD(&bus->m_rt_list); if (bus->ops->read_prop) { ret = bus->ops->read_prop(bus); @@ -77,6 +79,21 @@ int sdw_add_bus_master(struct sdw_bus *bus) return ret; } + /* + * Initialize clock values based on Master properties. The max + * frequency is read from max_freq property. Current assumption + * is that the bus will start at highest clock frequency when + * powered on. + * + * Default active bank will be 0 as out of reset the Slaves have + * to start with bank 0 (Table 40 of Spec) + */ + prop = &bus->prop; + bus->params.max_dr_freq = prop->max_freq * SDW_DOUBLE_RATE_FACTOR; + bus->params.curr_dr_freq = bus->params.max_dr_freq; + bus->params.curr_bank = SDW_BANK0; + bus->params.next_bank = SDW_BANK1; + return 0; } EXPORT_SYMBOL(sdw_add_bus_master); @@ -576,6 +593,32 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, mutex_unlock(&slave->bus->bus_lock); } +int sdw_configure_dpn_intr(struct sdw_slave *slave, + int port, bool enable, int mask) +{ + u32 addr; + int ret; + u8 val = 0; + + addr = SDW_DPN_INTMASK(port); + + /* Set/Clear port ready interrupt mask */ + if (enable) { + val |= mask; + val |= SDW_DPN_INT_PORT_READY; + } else { + val &= ~(mask); + val &= ~SDW_DPN_INT_PORT_READY; + } + + ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); + if (ret < 0) + dev_err(slave->bus->dev, + "SDW_DPN_INTMASK write failed:%d", val); + + return ret; +} + static int sdw_initialize_slave(struct sdw_slave *slave) { struct sdw_slave_prop *prop = &slave->prop; diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h index 345c34d697e9..3b15c4e25a3a 100644 --- a/drivers/soundwire/bus.h +++ b/drivers/soundwire/bus.h @@ -45,6 +45,78 @@ struct sdw_msg { bool page; }; +#define SDW_DOUBLE_RATE_FACTOR 2 + +extern int rows[SDW_FRAME_ROWS]; +extern int cols[SDW_FRAME_COLS]; + +/** + * sdw_port_runtime: Runtime port parameters for Master or Slave + * + * @num: Port number. For audio streams, valid port number ranges from + * [1,14] + * @ch_mask: Channel mask + * @transport_params: Transport parameters + * @port_params: Port parameters + * @port_node: List node for Master or Slave port_list + * + * SoundWire spec has no mention of ports for Master interface but the + * concept is logically extended. + */ +struct sdw_port_runtime { + int num; + int ch_mask; + struct sdw_transport_params transport_params; + struct sdw_port_params port_params; + struct list_head port_node; +}; + +/** + * sdw_slave_runtime: Runtime Stream parameters for Slave + * + * @slave: Slave handle + * @direction: Data direction for Slave + * @ch_count: Number of channels handled by the Slave for + * this stream + * @m_rt_node: sdw_master_runtime list node + * @port_list: List of Slave Ports configured for this stream + */ +struct sdw_slave_runtime { + struct sdw_slave *slave; + enum sdw_data_direction direction; + unsigned int ch_count; + struct list_head m_rt_node; + struct list_head port_list; +}; + +/** + * sdw_master_runtime: Runtime stream parameters for Master + * + * @bus: Bus handle + * @stream: Stream runtime handle + * @direction: Data direction for Master + * @ch_count: Number of channels handled by the Master for + * this stream, can be zero. + * @slave_rt_list: Slave runtime list + * @port_list: List of Master Ports configured for this stream, can be zero. + * @bus_node: sdw_bus m_rt_list node + */ +struct sdw_master_runtime { + struct sdw_bus *bus; + struct sdw_stream_runtime *stream; + enum sdw_data_direction direction; + unsigned int ch_count; + struct list_head slave_rt_list; + struct list_head port_list; + struct list_head bus_node; +}; + +struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, + enum sdw_data_direction direction, + unsigned int port_num); +int sdw_configure_dpn_intr(struct sdw_slave *slave, int port, + bool enable, int mask); + int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg); int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg, struct sdw_defer *defer); diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index 3a9b1462039b..cb6a331f448a 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -13,6 +13,8 @@ #include <linux/mod_devicetable.h> #include <linux/soundwire/sdw_registers.h> #include <linux/soundwire/sdw.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> #include "bus.h" #include "cadence_master.h" @@ -396,7 +398,7 @@ static int cdns_prep_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int *cmd) return 0; } -static enum sdw_command_response +enum sdw_command_response cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg) { struct sdw_cdns *cdns = bus_to_cdns(bus); @@ -422,8 +424,9 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg) exit: return ret; } +EXPORT_SYMBOL(cdns_xfer_msg); -static enum sdw_command_response +enum sdw_command_response cdns_xfer_msg_defer(struct sdw_bus *bus, struct sdw_msg *msg, struct sdw_defer *defer) { @@ -443,8 +446,9 @@ cdns_xfer_msg_defer(struct sdw_bus *bus, return _cdns_xfer_msg(cdns, msg, cmd, 0, msg->len, true); } +EXPORT_SYMBOL(cdns_xfer_msg_defer); -static enum sdw_command_response +enum sdw_command_response cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num) { struct sdw_cdns *cdns = bus_to_cdns(bus); @@ -456,6 +460,7 @@ cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num) return cdns_program_scp_addr(cdns, &msg); } +EXPORT_SYMBOL(cdns_reset_page_addr); /* * IRQ handling @@ -666,6 +671,120 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns) } EXPORT_SYMBOL(sdw_cdns_enable_interrupt); +static int cdns_allocate_pdi(struct sdw_cdns *cdns, + struct sdw_cdns_pdi **stream, + u32 num, u32 pdi_offset) +{ + struct sdw_cdns_pdi *pdi; + int i; + + if (!num) + return 0; + + pdi = devm_kcalloc(cdns->dev, num, sizeof(*pdi), GFP_KERNEL); + if (!pdi) + return -ENOMEM; + + for (i = 0; i < num; i++) { + pdi[i].num = i + pdi_offset; + pdi[i].assigned = false; + } + + *stream = pdi; + return 0; +} + +/** + * sdw_cdns_pdi_init() - PDI initialization routine + * + * @cdns: Cadence instance + * @config: Stream configurations + */ +int sdw_cdns_pdi_init(struct sdw_cdns *cdns, + struct sdw_cdns_stream_config config) +{ + struct sdw_cdns_streams *stream; + int offset, i, ret; + + cdns->pcm.num_bd = config.pcm_bd; + cdns->pcm.num_in = config.pcm_in; + cdns->pcm.num_out = config.pcm_out; + cdns->pdm.num_bd = config.pdm_bd; + cdns->pdm.num_in = config.pdm_in; + cdns->pdm.num_out = config.pdm_out; + + /* Allocate PDIs for PCMs */ + stream = &cdns->pcm; + + /* First two PDIs are reserved for bulk transfers */ + stream->num_bd -= CDNS_PCM_PDI_OFFSET; + offset = CDNS_PCM_PDI_OFFSET; + + ret = cdns_allocate_pdi(cdns, &stream->bd, + stream->num_bd, offset); + if (ret) + return ret; + + offset += stream->num_bd; + + ret = cdns_allocate_pdi(cdns, &stream->in, + stream->num_in, offset); + if (ret) + return ret; + + offset += stream->num_in; + + ret = cdns_allocate_pdi(cdns, &stream->out, + stream->num_out, offset); + if (ret) + return ret; + + /* Update total number of PCM PDIs */ + stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out; + cdns->num_ports = stream->num_pdi; + + /* Allocate PDIs for PDMs */ + stream = &cdns->pdm; + offset = CDNS_PDM_PDI_OFFSET; + ret = cdns_allocate_pdi(cdns, &stream->bd, + stream->num_bd, offset); + if (ret) + return ret; + + offset += stream->num_bd; + + ret = cdns_allocate_pdi(cdns, &stream->in, + stream->num_in, offset); + if (ret) + return ret; + + offset += stream->num_in; + + ret = cdns_allocate_pdi(cdns, &stream->out, + stream->num_out, offset); + if (ret) + return ret; + + /* Update total number of PDM PDIs */ + stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out; + cdns->num_ports += stream->num_pdi; + + cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports, + sizeof(*cdns->ports), GFP_KERNEL); + if (!cdns->ports) { + ret = -ENOMEM; + return ret; + } + + for (i = 0; i < cdns->num_ports; i++) { + cdns->ports[i].assigned = false; + cdns->ports[i].num = i + 1; /* Port 0 reserved for bulk */ + } + + return 0; +} +EXPORT_SYMBOL(sdw_cdns_pdi_init); + /** * sdw_cdns_init() - Cadence initialization * @cdns: Cadence instance @@ -727,13 +846,133 @@ int sdw_cdns_init(struct sdw_cdns *cdns) } EXPORT_SYMBOL(sdw_cdns_init); -struct sdw_master_ops sdw_cdns_master_ops = { - .read_prop = sdw_master_read_prop, - .xfer_msg = cdns_xfer_msg, - .xfer_msg_defer = cdns_xfer_msg_defer, - .reset_page_addr = cdns_reset_page_addr, +int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + int mcp_clkctrl_off, mcp_clkctrl; + int divider; + + if (!params->curr_dr_freq) { + dev_err(cdns->dev, "NULL curr_dr_freq"); + return -EINVAL; + } + + divider = (params->max_dr_freq / params->curr_dr_freq) - 1; + + if (params->next_bank) + mcp_clkctrl_off = CDNS_MCP_CLK_CTRL1; + else + mcp_clkctrl_off = CDNS_MCP_CLK_CTRL0; + + mcp_clkctrl = cdns_readl(cdns, mcp_clkctrl_off); + mcp_clkctrl |= divider; + cdns_writel(cdns, mcp_clkctrl_off, mcp_clkctrl); + + return 0; +} +EXPORT_SYMBOL(cdns_bus_conf); + +static int cdns_port_params(struct sdw_bus *bus, + struct sdw_port_params *p_params, unsigned int bank) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + int dpn_config = 0, dpn_config_off; + + if (bank) + dpn_config_off = CDNS_DPN_B1_CONFIG(p_params->num); + else + dpn_config_off = CDNS_DPN_B0_CONFIG(p_params->num); + + dpn_config = cdns_readl(cdns, dpn_config_off); + + dpn_config |= ((p_params->bps - 1) << + SDW_REG_SHIFT(CDNS_DPN_CONFIG_WL)); + dpn_config |= (p_params->flow_mode << + SDW_REG_SHIFT(CDNS_DPN_CONFIG_PORT_FLOW)); + dpn_config |= (p_params->data_mode << + SDW_REG_SHIFT(CDNS_DPN_CONFIG_PORT_DAT)); + + cdns_writel(cdns, dpn_config_off, dpn_config); + + return 0; +} + +static int cdns_transport_params(struct sdw_bus *bus, + struct sdw_transport_params *t_params, + enum sdw_reg_bank bank) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + int dpn_offsetctrl = 0, dpn_offsetctrl_off; + int dpn_config = 0, dpn_config_off; + int dpn_hctrl = 0, dpn_hctrl_off; + int num = t_params->port_num; + int dpn_samplectrl_off; + + /* + * Note: Only full data port is supported on the Master side for + * both PCM and PDM ports. + */ + + if (bank) { + dpn_config_off = CDNS_DPN_B1_CONFIG(num); + dpn_samplectrl_off = CDNS_DPN_B1_SAMPLE_CTRL(num); + dpn_hctrl_off = CDNS_DPN_B1_HCTRL(num); + dpn_offsetctrl_off = CDNS_DPN_B1_OFFSET_CTRL(num); + } else { + dpn_config_off = CDNS_DPN_B0_CONFIG(num); + dpn_samplectrl_off = CDNS_DPN_B0_SAMPLE_CTRL(num); + dpn_hctrl_off = CDNS_DPN_B0_HCTRL(num); + dpn_offsetctrl_off = CDNS_DPN_B0_OFFSET_CTRL(num); + } + + dpn_config = cdns_readl(cdns, dpn_config_off); + + dpn_config |= (t_params->blk_grp_ctrl << + SDW_REG_SHIFT(CDNS_DPN_CONFIG_BGC)); + dpn_config |= (t_params->blk_pkg_mode << + SDW_REG_SHIFT(CDNS_DPN_CONFIG_BPM)); + cdns_writel(cdns, dpn_config_off, dpn_config); + + dpn_offsetctrl |= (t_params->offset1 << + SDW_REG_SHIFT(CDNS_DPN_OFFSET_CTRL_1)); + dpn_offsetctrl |= (t_params->offset2 << + SDW_REG_SHIFT(CDNS_DPN_OFFSET_CTRL_2)); + cdns_writel(cdns, dpn_offsetctrl_off, dpn_offsetctrl); + + dpn_hctrl |= (t_params->hstart << + SDW_REG_SHIFT(CDNS_DPN_HCTRL_HSTART)); + dpn_hctrl |= (t_params->hstop << SDW_REG_SHIFT(CDNS_DPN_HCTRL_HSTOP)); + dpn_hctrl |= (t_params->lane_ctrl << + SDW_REG_SHIFT(CDNS_DPN_HCTRL_LCTRL)); + + cdns_writel(cdns, dpn_hctrl_off, dpn_hctrl); + cdns_writel(cdns, dpn_samplectrl_off, (t_params->sample_interval - 1)); + + return 0; +} + +static int cdns_port_enable(struct sdw_bus *bus, + struct sdw_enable_ch *enable_ch, unsigned int bank) +{ + struct sdw_cdns *cdns = bus_to_cdns(bus); + int dpn_chnen_off, ch_mask; + + if (bank) + dpn_chnen_off = CDNS_DPN_B1_CH_EN(enable_ch->port_num); + else + dpn_chnen_off = CDNS_DPN_B0_CH_EN(enable_ch->port_num); + + ch_mask = enable_ch->ch_mask * enable_ch->enable; + cdns_writel(cdns, dpn_chnen_off, ch_mask); + + return 0; +} + +static const struct sdw_master_port_ops cdns_port_ops = { + .dpn_set_port_params = cdns_port_params, + .dpn_set_port_transport_params = cdns_transport_params, + .dpn_port_enable_ch = cdns_port_enable, }; -EXPORT_SYMBOL(sdw_cdns_master_ops); /** * sdw_cdns_probe() - Cadence probe routine @@ -742,10 +981,204 @@ EXPORT_SYMBOL(sdw_cdns_master_ops); int sdw_cdns_probe(struct sdw_cdns *cdns) { init_completion(&cdns->tx_complete); + cdns->bus.port_ops = &cdns_port_ops; return 0; } EXPORT_SYMBOL(sdw_cdns_probe); +int cdns_set_sdw_stream(struct snd_soc_dai *dai, + void *stream, bool pcm, int direction) +{ + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_cdns_dma_data *dma; + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + if (pcm) + dma->stream_type = SDW_STREAM_PCM; + else + dma->stream_type = SDW_STREAM_PDM; + + dma->bus = &cdns->bus; + dma->link_id = cdns->instance; + + dma->stream = stream; + + if (direction == SNDRV_PCM_STREAM_PLAYBACK) + dai->playback_dma_data = dma; + else + dai->capture_dma_data = dma; + + return 0; +} +EXPORT_SYMBOL(cdns_set_sdw_stream); + +/** + * cdns_find_pdi() - Find a free PDI + * + * @cdns: Cadence instance + * @num: Number of PDIs + * @pdi: PDI instances + * + * Find and return a free PDI for a given PDI array + */ +static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns, + unsigned int num, struct sdw_cdns_pdi *pdi) +{ + int i; + + for (i = 0; i < num; i++) { + if (pdi[i].assigned == true) + continue; + pdi[i].assigned = true; + return &pdi[i]; + } + + return NULL; +} + +/** + * sdw_cdns_config_stream: Configure a stream + * + * @cdns: Cadence instance + * @port: Cadence data port + * @ch: Channel count + * @dir: Data direction + * @pdi: PDI to be used + */ +void sdw_cdns_config_stream(struct sdw_cdns *cdns, + struct sdw_cdns_port *port, + u32 ch, u32 dir, struct sdw_cdns_pdi *pdi) +{ + u32 offset, val = 0; + + if (dir == SDW_DATA_DIR_RX) + val = CDNS_PORTCTRL_DIRN; + + offset = CDNS_PORTCTRL + port->num * CDNS_PORT_OFFSET; + cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val); + + val = port->num; + val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL); + cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val); +} +EXPORT_SYMBOL(sdw_cdns_config_stream); + +/** + * cdns_get_num_pdi() - Get number of PDIs required + * + * @cdns: Cadence instance + * @pdi: PDI to be used + * @num: Number of PDIs + * @ch_count: Channel count + */ +static int cdns_get_num_pdi(struct sdw_cdns *cdns, + struct sdw_cdns_pdi *pdi, + unsigned int num, u32 ch_count) +{ + int i, pdis = 0; + + for (i = 0; i < num; i++) { + if (pdi[i].assigned == true) + continue; + + if (pdi[i].ch_count < ch_count) + ch_count -= pdi[i].ch_count; + else + ch_count = 0; + + pdis++; + + if (!ch_count) + break; + } + + if (ch_count) + return 0; + + return pdis; +} + +/** + * sdw_cdns_get_stream() - Get stream information + * + * @cdns: Cadence instance + * @stream: Stream to be allocated + * @ch: Channel count + * @dir: Data direction + */ +int sdw_cdns_get_stream(struct sdw_cdns *cdns, + struct sdw_cdns_streams *stream, + u32 ch, u32 dir) +{ + int pdis = 0; + + if (dir == SDW_DATA_DIR_RX) + pdis = cdns_get_num_pdi(cdns, stream->in, stream->num_in, ch); + else + pdis = cdns_get_num_pdi(cdns, stream->out, stream->num_out, ch); + + /* check if we found PDI, else find in bi-directional */ + if (!pdis) + pdis = cdns_get_num_pdi(cdns, stream->bd, stream->num_bd, ch); + + return pdis; +} +EXPORT_SYMBOL(sdw_cdns_get_stream); + +/** + * sdw_cdns_alloc_stream() - Allocate a stream + * + * @cdns: Cadence instance + * @stream: Stream to be allocated + * @port: Cadence data port + * @ch: Channel count + * @dir: Data direction + */ +int sdw_cdns_alloc_stream(struct sdw_cdns *cdns, + struct sdw_cdns_streams *stream, + struct sdw_cdns_port *port, u32 ch, u32 dir) +{ + struct sdw_cdns_pdi *pdi = NULL; + + if (dir == SDW_DATA_DIR_RX) + pdi = cdns_find_pdi(cdns, stream->num_in, stream->in); + else + pdi = cdns_find_pdi(cdns, stream->num_out, stream->out); + + /* check if we found a PDI, else find in bi-directional */ + if (!pdi) + pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd); + + if (!pdi) + return -EIO; + + port->pdi = pdi; + pdi->l_ch_num = 0; + pdi->h_ch_num = ch - 1; + pdi->dir = dir; + pdi->ch_count = ch; + + return 0; +} +EXPORT_SYMBOL(sdw_cdns_alloc_stream); + +void sdw_cdns_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct sdw_cdns_dma_data *dma; + + dma = snd_soc_dai_get_dma_data(dai, substream); + if (!dma) + return; + + snd_soc_dai_set_dma_data(dai, substream, NULL); + kfree(dma); +} +EXPORT_SYMBOL(sdw_cdns_shutdown); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Cadence Soundwire Library"); diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index beaf6c9804eb..eb902b19c5a4 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -1,10 +1,117 @@ // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) // Copyright(c) 2015-17 Intel Corporation. +#include <sound/soc.h> #ifndef __SDW_CADENCE_H #define __SDW_CADENCE_H /** + * struct sdw_cdns_pdi: PDI (Physical Data Interface) instance + * + * @assigned: pdi assigned + * @num: pdi number + * @intel_alh_id: link identifier + * @l_ch_num: low channel for PDI + * @h_ch_num: high channel for PDI + * @ch_count: total channel count for PDI + * @dir: data direction + * @type: stream type, PDM or PCM + */ +struct sdw_cdns_pdi { + bool assigned; + int num; + int intel_alh_id; + int l_ch_num; + int h_ch_num; + int ch_count; + enum sdw_data_direction dir; + enum sdw_stream_type type; +}; + +/** + * struct sdw_cdns_port: Cadence port structure + * + * @num: port number + * @assigned: port assigned + * @ch: channel count + * @direction: data port direction + * @pdi: pdi for this port + */ +struct sdw_cdns_port { + unsigned int num; + bool assigned; + unsigned int ch; + enum sdw_data_direction direction; + struct sdw_cdns_pdi *pdi; +}; + +/** + * struct sdw_cdns_streams: Cadence stream data structure + * + * @num_bd: number of bidirectional streams + * @num_in: number of input streams + * @num_out: number of output streams + * @num_ch_bd: number of bidirectional stream channels + * @num_ch_bd: number of input stream channels + * @num_ch_bd: number of output stream channels + * @num_pdi: total number of PDIs + * @bd: bidirectional streams + * @in: input streams + * @out: output streams + */ +struct sdw_cdns_streams { + unsigned int num_bd; + unsigned int num_in; + unsigned int num_out; + unsigned int num_ch_bd; + unsigned int num_ch_in; + unsigned int num_ch_out; + unsigned int num_pdi; + struct sdw_cdns_pdi *bd; + struct sdw_cdns_pdi *in; + struct sdw_cdns_pdi *out; +}; + +/** + * struct sdw_cdns_stream_config: stream configuration + * + * @pcm_bd: number of bidirectional PCM streams supported + * @pcm_in: number of input PCM streams supported + * @pcm_out: number of output PCM streams supported + * @pdm_bd: number of bidirectional PDM streams supported + * @pdm_in: number of input PDM streams supported + * @pdm_out: number of output PDM streams supported + */ +struct sdw_cdns_stream_config { + unsigned int pcm_bd; + unsigned int pcm_in; + unsigned int pcm_out; + unsigned int pdm_bd; + unsigned int pdm_in; + unsigned int pdm_out; +}; + +/** + * struct sdw_cdns_dma_data: Cadence DMA data + * + * @name: SoundWire stream name + * @nr_ports: Number of ports + * @port: Ports + * @bus: Bus handle + * @stream_type: Stream type + * @link_id: Master link id + */ +struct sdw_cdns_dma_data { + char *name; + struct sdw_stream_runtime *stream; + int nr_ports; + struct sdw_cdns_port **port; + struct sdw_bus *bus; + enum sdw_stream_type stream_type; + int link_id; +}; + +/** * struct sdw_cdns - Cadence driver context * @dev: Linux device * @bus: Bus handle @@ -12,6 +119,10 @@ * @response_buf: SoundWire response buffer * @tx_complete: Tx completion * @defer: Defer pointer + * @ports: Data ports + * @num_ports: Total number of data ports + * @pcm: PCM streams + * @pdm: PDM streams * @registers: Cadence registers * @link_up: Link status * @msg_count: Messages sent on bus @@ -25,6 +136,12 @@ struct sdw_cdns { struct completion tx_complete; struct sdw_defer *defer; + struct sdw_cdns_port *ports; + int num_ports; + + struct sdw_cdns_streams pcm; + struct sdw_cdns_streams pdm; + void __iomem *registers; bool link_up; @@ -42,7 +159,41 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id); irqreturn_t sdw_cdns_thread(int irq, void *dev_id); int sdw_cdns_init(struct sdw_cdns *cdns); +int sdw_cdns_pdi_init(struct sdw_cdns *cdns, + struct sdw_cdns_stream_config config); int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns); +int sdw_cdns_get_stream(struct sdw_cdns *cdns, + struct sdw_cdns_streams *stream, + u32 ch, u32 dir); +int sdw_cdns_alloc_stream(struct sdw_cdns *cdns, + struct sdw_cdns_streams *stream, + struct sdw_cdns_port *port, u32 ch, u32 dir); +void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port, + u32 ch, u32 dir, struct sdw_cdns_pdi *pdi); + +void sdw_cdns_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai); +int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai, + void *stream, int direction); +int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai, + void *stream, int direction); + +enum sdw_command_response +cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num); + +enum sdw_command_response +cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg); + +enum sdw_command_response +cdns_xfer_msg_defer(struct sdw_bus *bus, + struct sdw_msg *msg, struct sdw_defer *defer); + +enum sdw_command_response +cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num); + +int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params); +int cdns_set_sdw_stream(struct snd_soc_dai *dai, + void *stream, bool pcm, int direction); #endif /* __SDW_CADENCE_H */ diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 86a7bd1fc912..0a8990e758f9 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -9,6 +9,8 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <sound/pcm_params.h> +#include <sound/soc.h> #include <linux/soundwire/sdw_registers.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_intel.h> @@ -85,6 +87,12 @@ #define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) #define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16) +enum intel_pdi_type { + INTEL_PDI_IN = 0, + INTEL_PDI_OUT = 1, + INTEL_PDI_BD = 2, +}; + struct sdw_intel { struct sdw_cdns cdns; int instance; @@ -234,6 +242,490 @@ static int intel_shim_init(struct sdw_intel *sdw) return ret; } +/* + * PDI routines + */ +static void intel_pdi_init(struct sdw_intel *sdw, + struct sdw_cdns_stream_config *config) +{ + void __iomem *shim = sdw->res->shim; + unsigned int link_id = sdw->instance; + int pcm_cap, pdm_cap; + + /* PCM Stream Capability */ + pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id)); + + config->pcm_bd = (pcm_cap & SDW_SHIM_PCMSCAP_BSS) >> + SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_BSS); + config->pcm_in = (pcm_cap & SDW_SHIM_PCMSCAP_ISS) >> + SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_ISS); + config->pcm_out = (pcm_cap & SDW_SHIM_PCMSCAP_OSS) >> + SDW_REG_SHIFT(SDW_SHIM_PCMSCAP_OSS); + + /* PDM Stream Capability */ + pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); + + config->pdm_bd = (pdm_cap & SDW_SHIM_PDMSCAP_BSS) >> + SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_BSS); + config->pdm_in = (pdm_cap & SDW_SHIM_PDMSCAP_ISS) >> + SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_ISS); + config->pdm_out = (pdm_cap & SDW_SHIM_PDMSCAP_OSS) >> + SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_OSS); +} + +static int +intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm) +{ + void __iomem *shim = sdw->res->shim; + unsigned int link_id = sdw->instance; + int count; + + if (pcm) { + count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num)); + } else { + count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id)); + count = ((count & SDW_SHIM_PDMSCAP_CPSS) >> + SDW_REG_SHIFT(SDW_SHIM_PDMSCAP_CPSS)); + } + + /* zero based values for channel count in register */ + count++; + + return count; +} + +static int intel_pdi_get_ch_update(struct sdw_intel *sdw, + struct sdw_cdns_pdi *pdi, + unsigned int num_pdi, + unsigned int *num_ch, bool pcm) +{ + int i, ch_count = 0; + + for (i = 0; i < num_pdi; i++) { + pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm); + ch_count += pdi->ch_count; + pdi++; + } + + *num_ch = ch_count; + return 0; +} + +static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, + struct sdw_cdns_streams *stream, bool pcm) +{ + intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, + &stream->num_ch_bd, pcm); + + intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, + &stream->num_ch_in, pcm); + + intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, + &stream->num_ch_out, pcm); + + return 0; +} + +static int intel_pdi_ch_update(struct sdw_intel *sdw) +{ + /* First update PCM streams followed by PDM streams */ + intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true); + intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false); + + return 0; +} + +static void +intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) +{ + void __iomem *shim = sdw->res->shim; + unsigned int link_id = sdw->instance; + int pdi_conf = 0; + + pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + + /* + * Program stream parameters to stream SHIM register + * This is applicable for PCM stream only. + */ + if (pdi->type != SDW_STREAM_PCM) + return; + + if (pdi->dir == SDW_DATA_DIR_RX) + pdi_conf |= SDW_SHIM_PCMSYCM_DIR; + else + pdi_conf &= ~(SDW_SHIM_PCMSYCM_DIR); + + pdi_conf |= (pdi->intel_alh_id << + SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_STREAM)); + pdi_conf |= (pdi->l_ch_num << SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_LCHN)); + pdi_conf |= (pdi->h_ch_num << SDW_REG_SHIFT(SDW_SHIM_PCMSYCM_HCHN)); + + intel_writew(shim, SDW_SHIM_PCMSYCHM(link_id, pdi->num), pdi_conf); +} + +static void +intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi) +{ + void __iomem *alh = sdw->res->alh; + unsigned int link_id = sdw->instance; + unsigned int conf; + + pdi->intel_alh_id = (link_id * 16) + pdi->num + 5; + + /* Program Stream config ALH register */ + conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id)); + + conf |= (SDW_ALH_STRMZCFG_DMAT_VAL << + SDW_REG_SHIFT(SDW_ALH_STRMZCFG_DMAT)); + + conf |= ((pdi->ch_count - 1) << + SDW_REG_SHIFT(SDW_ALH_STRMZCFG_CHN)); + + intel_writel(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id), conf); +} + +static int intel_config_stream(struct sdw_intel *sdw, + struct snd_pcm_substream *substream, + struct snd_soc_dai *dai, + struct snd_pcm_hw_params *hw_params, int link_id) +{ + if (sdw->res->ops && sdw->res->ops->config_stream) + return sdw->res->ops->config_stream(sdw->res->arg, + substream, dai, hw_params, link_id); + + return -EIO; +} + +/* + * DAI routines + */ + +static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw, + u32 ch, u32 dir, bool pcm) +{ + struct sdw_cdns *cdns = &sdw->cdns; + struct sdw_cdns_port *port = NULL; + int i, ret = 0; + + for (i = 0; i < cdns->num_ports; i++) { + if (cdns->ports[i].assigned == true) + continue; + + port = &cdns->ports[i]; + port->assigned = true; + port->direction = dir; + port->ch = ch; + break; + } + + if (!port) { + dev_err(cdns->dev, "Unable to find a free port\n"); + return NULL; + } + + if (pcm) { + ret = sdw_cdns_alloc_stream(cdns, &cdns->pcm, port, ch, dir); + if (ret) + goto out; + + intel_pdi_shim_configure(sdw, port->pdi); + sdw_cdns_config_stream(cdns, port, ch, dir, port->pdi); + + intel_pdi_alh_configure(sdw, port->pdi); + + } else { + ret = sdw_cdns_alloc_stream(cdns, &cdns->pdm, port, ch, dir); + } + +out: + if (ret) { + port->assigned = false; + port = NULL; + } + + return port; +} + +static void intel_port_cleanup(struct sdw_cdns_dma_data *dma) +{ + int i; + + for (i = 0; i < dma->nr_ports; i++) { + if (dma->port[i]) { + dma->port[i]->pdi->assigned = false; + dma->port[i]->pdi = NULL; + dma->port[i]->assigned = false; + dma->port[i] = NULL; + } + } +} + +static int intel_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_intel *sdw = cdns_to_intel(cdns); + struct sdw_cdns_dma_data *dma; + struct sdw_stream_config sconfig; + struct sdw_port_config *pconfig; + int ret, i, ch, dir; + bool pcm = true; + + dma = snd_soc_dai_get_dma_data(dai, substream); + if (!dma) + return -EIO; + + ch = params_channels(params); + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) + dir = SDW_DATA_DIR_RX; + else + dir = SDW_DATA_DIR_TX; + + if (dma->stream_type == SDW_STREAM_PDM) { + /* TODO: Check whether PDM decimator is already in use */ + dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pdm, ch, dir); + pcm = false; + } else { + dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pcm, ch, dir); + } + + if (!dma->nr_ports) { + dev_err(dai->dev, "ports/resources not available"); + return -EINVAL; + } + + dma->port = kcalloc(dma->nr_ports, sizeof(*dma->port), GFP_KERNEL); + if (!dma->port) + return -ENOMEM; + + for (i = 0; i < dma->nr_ports; i++) { + dma->port[i] = intel_alloc_port(sdw, ch, dir, pcm); + if (!dma->port[i]) { + ret = -EINVAL; + goto port_error; + } + } + + /* Inform DSP about PDI stream number */ + for (i = 0; i < dma->nr_ports; i++) { + ret = intel_config_stream(sdw, substream, dai, params, + dma->port[i]->pdi->intel_alh_id); + if (ret) + goto port_error; + } + + sconfig.direction = dir; + sconfig.ch_count = ch; + sconfig.frame_rate = params_rate(params); + sconfig.type = dma->stream_type; + + if (dma->stream_type == SDW_STREAM_PDM) { + sconfig.frame_rate *= 50; + sconfig.bps = 1; + } else { + sconfig.bps = snd_pcm_format_width(params_format(params)); + } + + /* Port configuration */ + pconfig = kcalloc(dma->nr_ports, sizeof(*pconfig), GFP_KERNEL); + if (!pconfig) { + ret = -ENOMEM; + goto port_error; + } + + for (i = 0; i < dma->nr_ports; i++) { + pconfig[i].num = dma->port[i]->num; + pconfig[i].ch_mask = (1 << ch) - 1; + } + + ret = sdw_stream_add_master(&cdns->bus, &sconfig, + pconfig, dma->nr_ports, dma->stream); + if (ret) { + dev_err(cdns->dev, "add master to stream failed:%d", ret); + goto stream_error; + } + + kfree(pconfig); + return ret; + +stream_error: + kfree(pconfig); +port_error: + intel_port_cleanup(dma); + kfree(dma->port); + return ret; +} + +static int +intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) +{ + struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); + struct sdw_cdns_dma_data *dma; + int ret; + + dma = snd_soc_dai_get_dma_data(dai, substream); + if (!dma) + return -EIO; + + ret = sdw_stream_remove_master(&cdns->bus, dma->stream); + if (ret < 0) + dev_err(dai->dev, "remove master from stream %s failed: %d", + dma->stream->name, ret); + + intel_port_cleanup(dma); + kfree(dma->port); + return ret; +} + +static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, + void *stream, int direction) +{ + return cdns_set_sdw_stream(dai, stream, true, direction); +} + +static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai, + void *stream, int direction) +{ + return cdns_set_sdw_stream(dai, stream, false, direction); +} + +static struct snd_soc_dai_ops intel_pcm_dai_ops = { + .hw_params = intel_hw_params, + .hw_free = intel_hw_free, + .shutdown = sdw_cdns_shutdown, + .set_sdw_stream = intel_pcm_set_sdw_stream, +}; + +static struct snd_soc_dai_ops intel_pdm_dai_ops = { + .hw_params = intel_hw_params, + .hw_free = intel_hw_free, + .shutdown = sdw_cdns_shutdown, + .set_sdw_stream = intel_pdm_set_sdw_stream, +}; + +static const struct snd_soc_component_driver dai_component = { + .name = "soundwire", +}; + +static int intel_create_dai(struct sdw_cdns *cdns, + struct snd_soc_dai_driver *dais, + enum intel_pdi_type type, + u32 num, u32 off, u32 max_ch, bool pcm) +{ + int i; + + if (num == 0) + return 0; + + /* TODO: Read supported rates/formats from hardware */ + for (i = off; i < (off + num); i++) { + dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", + cdns->instance, i); + if (!dais[i].name) + return -ENOMEM; + + if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { + dais[i].playback.stream_name = kasprintf(GFP_KERNEL, + "SDW%d Tx%d", + cdns->instance, i); + if (!dais[i].playback.stream_name) { + kfree(dais[i].name); + return -ENOMEM; + } + + dais[i].playback.channels_min = 1; + dais[i].playback.channels_max = max_ch; + dais[i].playback.rates = SNDRV_PCM_RATE_48000; + dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE; + } + + if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { + dais[i].capture.stream_name = kasprintf(GFP_KERNEL, + "SDW%d Rx%d", + cdns->instance, i); + if (!dais[i].capture.stream_name) { + kfree(dais[i].name); + kfree(dais[i].playback.stream_name); + return -ENOMEM; + } + + dais[i].playback.channels_min = 1; + dais[i].playback.channels_max = max_ch; + dais[i].capture.rates = SNDRV_PCM_RATE_48000; + dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE; + } + + dais[i].id = SDW_DAI_ID_RANGE_START + i; + + if (pcm) + dais[i].ops = &intel_pcm_dai_ops; + else + dais[i].ops = &intel_pdm_dai_ops; + } + + return 0; +} + +static int intel_register_dai(struct sdw_intel *sdw) +{ + struct sdw_cdns *cdns = &sdw->cdns; + struct sdw_cdns_streams *stream; + struct snd_soc_dai_driver *dais; + int num_dai, ret, off = 0; + + /* DAIs are created based on total number of PDIs supported */ + num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi; + + dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL); + if (!dais) + return -ENOMEM; + + /* Create PCM DAIs */ + stream = &cdns->pcm; + + ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, + stream->num_in, off, stream->num_ch_in, true); + if (ret) + return ret; + + off += cdns->pcm.num_in; + ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, + cdns->pcm.num_out, off, stream->num_ch_out, true); + if (ret) + return ret; + + off += cdns->pcm.num_out; + ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, + cdns->pcm.num_bd, off, stream->num_ch_bd, true); + if (ret) + return ret; + + /* Create PDM DAIs */ + stream = &cdns->pdm; + off += cdns->pcm.num_bd; + ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, + cdns->pdm.num_in, off, stream->num_ch_in, false); + if (ret) + return ret; + + off += cdns->pdm.num_in; + ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, + cdns->pdm.num_out, off, stream->num_ch_out, false); + if (ret) + return ret; + + off += cdns->pdm.num_bd; + ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, + cdns->pdm.num_bd, off, stream->num_ch_bd, false); + if (ret) + return ret; + + return snd_soc_register_component(cdns->dev, &dai_component, + dais, num_dai); +} + static int intel_prop_read(struct sdw_bus *bus) { /* Initialize with default handler to read all DisCo properties */ @@ -252,11 +744,20 @@ static int intel_prop_read(struct sdw_bus *bus) return 0; } +static struct sdw_master_ops sdw_intel_ops = { + .read_prop = sdw_master_read_prop, + .xfer_msg = cdns_xfer_msg, + .xfer_msg_defer = cdns_xfer_msg_defer, + .reset_page_addr = cdns_reset_page_addr, + .set_bus_conf = cdns_bus_conf, +}; + /* * probe and init */ static int intel_probe(struct platform_device *pdev) { + struct sdw_cdns_stream_config config; struct sdw_intel *sdw; int ret; @@ -276,8 +777,11 @@ static int intel_probe(struct platform_device *pdev) sdw_cdns_probe(&sdw->cdns); /* Set property read ops */ - sdw_cdns_master_ops.read_prop = intel_prop_read; - sdw->cdns.bus.ops = &sdw_cdns_master_ops; + sdw_intel_ops.read_prop = intel_prop_read; + sdw->cdns.bus.ops = &sdw_intel_ops; + + sdw_intel_ops.read_prop = intel_prop_read; + sdw->cdns.bus.ops = &sdw_intel_ops; platform_set_drvdata(pdev, sdw); @@ -296,9 +800,15 @@ static int intel_probe(struct platform_device *pdev) goto err_init; ret = sdw_cdns_enable_interrupt(&sdw->cdns); + + /* Read the PDI config and initialize cadence PDI */ + intel_pdi_init(sdw, &config); + ret = sdw_cdns_pdi_init(&sdw->cdns, config); if (ret) goto err_init; + intel_pdi_ch_update(sdw); + /* Acquire IRQ */ ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, sdw_cdns_thread, IRQF_SHARED, KBUILD_MODNAME, @@ -309,8 +819,18 @@ static int intel_probe(struct platform_device *pdev) goto err_init; } + /* Register DAIs */ + ret = intel_register_dai(sdw); + if (ret) { + dev_err(sdw->cdns.dev, "DAI registration failed: %d", ret); + snd_soc_unregister_component(sdw->cdns.dev); + goto err_dai; + } + return 0; +err_dai: + free_irq(sdw->res->irq, sdw); err_init: sdw_delete_bus_master(&sdw->cdns.bus); err_master_reg: @@ -324,6 +844,7 @@ static int intel_remove(struct platform_device *pdev) sdw = platform_get_drvdata(pdev); free_irq(sdw->res->irq, sdw); + snd_soc_unregister_component(sdw->cdns.dev); sdw_delete_bus_master(&sdw->cdns.bus); return 0; diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h index ffa30d9535a2..c1a5bac6212e 100644 --- a/drivers/soundwire/intel.h +++ b/drivers/soundwire/intel.h @@ -10,6 +10,8 @@ * @shim: Audio shim pointer * @alh: ALH (Audio Link Hub) pointer * @irq: Interrupt line + * @ops: Shim callback ops + * @arg: Shim callback ops argument * * This is set as pdata for each link instance. */ @@ -18,6 +20,8 @@ struct sdw_intel_link_res { void __iomem *shim; void __iomem *alh; int irq; + const struct sdw_intel_ops *ops; + void *arg; }; #endif /* __SDW_INTEL_LOCAL_H */ diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c index 6f2bb99526f2..d1ea6b4d0ad3 100644 --- a/drivers/soundwire/intel_init.c +++ b/drivers/soundwire/intel_init.c @@ -111,6 +111,9 @@ static struct sdw_intel_ctx link->res.shim = res->mmio_base + SDW_SHIM_BASE; link->res.alh = res->mmio_base + SDW_ALH_BASE; + link->res.ops = res->ops; + link->res.arg = res->arg; + memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = res->parent; diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c new file mode 100644 index 000000000000..8974a0fcda1b --- /dev/null +++ b/drivers/soundwire/stream.c @@ -0,0 +1,1479 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-18 Intel Corporation. + +/* + * stream.c - SoundWire Bus stream operations. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> +#include <linux/soundwire/sdw_registers.h> +#include <linux/soundwire/sdw.h> +#include "bus.h" + +/* + * Array of supported rows and columns as per MIPI SoundWire Specification 1.1 + * + * The rows are arranged as per the array index value programmed + * in register. The index 15 has dummy value 0 in order to fill hole. + */ +int rows[SDW_FRAME_ROWS] = {48, 50, 60, 64, 75, 80, 125, 147, + 96, 100, 120, 128, 150, 160, 250, 0, + 192, 200, 240, 256, 72, 144, 90, 180}; + +int cols[SDW_FRAME_COLS] = {2, 4, 6, 8, 10, 12, 14, 16}; + +static int sdw_find_col_index(int col) +{ + int i; + + for (i = 0; i < SDW_FRAME_COLS; i++) { + if (cols[i] == col) + return i; + } + + pr_warn("Requested column not found, selecting lowest column no: 2\n"); + return 0; +} + +static int sdw_find_row_index(int row) +{ + int i; + + for (i = 0; i < SDW_FRAME_ROWS; i++) { + if (rows[i] == row) + return i; + } + + pr_warn("Requested row not found, selecting lowest row no: 48\n"); + return 0; +} +static int _sdw_program_slave_port_params(struct sdw_bus *bus, + struct sdw_slave *slave, + struct sdw_transport_params *t_params, + enum sdw_dpn_type type) +{ + u32 addr1, addr2, addr3, addr4; + int ret; + u16 wbuf; + + if (bus->params.next_bank) { + addr1 = SDW_DPN_OFFSETCTRL2_B1(t_params->port_num); + addr2 = SDW_DPN_BLOCKCTRL3_B1(t_params->port_num); + addr3 = SDW_DPN_SAMPLECTRL2_B1(t_params->port_num); + addr4 = SDW_DPN_HCTRL_B1(t_params->port_num); + } else { + addr1 = SDW_DPN_OFFSETCTRL2_B0(t_params->port_num); + addr2 = SDW_DPN_BLOCKCTRL3_B0(t_params->port_num); + addr3 = SDW_DPN_SAMPLECTRL2_B0(t_params->port_num); + addr4 = SDW_DPN_HCTRL_B0(t_params->port_num); + } + + /* Program DPN_OffsetCtrl2 registers */ + ret = sdw_write(slave, addr1, t_params->offset2); + if (ret < 0) { + dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed"); + return ret; + } + + /* Program DPN_BlockCtrl3 register */ + ret = sdw_write(slave, addr2, t_params->blk_pkg_mode); + if (ret < 0) { + dev_err(bus->dev, "DPN_BlockCtrl3 register write failed"); + return ret; + } + + /* + * Data ports are FULL, SIMPLE and REDUCED. This function handles + * FULL and REDUCED only and and beyond this point only FULL is + * handled, so bail out if we are not FULL data port type + */ + if (type != SDW_DPN_FULL) + return ret; + + /* Program DPN_SampleCtrl2 register */ + wbuf = (t_params->sample_interval - 1); + wbuf &= SDW_DPN_SAMPLECTRL_HIGH; + wbuf >>= SDW_REG_SHIFT(SDW_DPN_SAMPLECTRL_HIGH); + + ret = sdw_write(slave, addr3, wbuf); + if (ret < 0) { + dev_err(bus->dev, "DPN_SampleCtrl2 register write failed"); + return ret; + } + + /* Program DPN_HCtrl register */ + wbuf = t_params->hstart; + wbuf <<= SDW_REG_SHIFT(SDW_DPN_HCTRL_HSTART); + wbuf |= t_params->hstop; + + ret = sdw_write(slave, addr4, wbuf); + if (ret < 0) + dev_err(bus->dev, "DPN_HCtrl register write failed"); + + return ret; +} + +static int sdw_program_slave_port_params(struct sdw_bus *bus, + struct sdw_slave_runtime *s_rt, + struct sdw_port_runtime *p_rt) +{ + struct sdw_transport_params *t_params = &p_rt->transport_params; + struct sdw_port_params *p_params = &p_rt->port_params; + struct sdw_slave_prop *slave_prop = &s_rt->slave->prop; + u32 addr1, addr2, addr3, addr4, addr5, addr6; + struct sdw_dpn_prop *dpn_prop; + int ret; + u8 wbuf; + + dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, + s_rt->direction, + t_params->port_num); + if (!dpn_prop) + return -EINVAL; + + addr1 = SDW_DPN_PORTCTRL(t_params->port_num); + addr2 = SDW_DPN_BLOCKCTRL1(t_params->port_num); + + if (bus->params.next_bank) { + addr3 = SDW_DPN_SAMPLECTRL1_B1(t_params->port_num); + addr4 = SDW_DPN_OFFSETCTRL1_B1(t_params->port_num); + addr5 = SDW_DPN_BLOCKCTRL2_B1(t_params->port_num); + addr6 = SDW_DPN_LANECTRL_B1(t_params->port_num); + + } else { + addr3 = SDW_DPN_SAMPLECTRL1_B0(t_params->port_num); + addr4 = SDW_DPN_OFFSETCTRL1_B0(t_params->port_num); + addr5 = SDW_DPN_BLOCKCTRL2_B0(t_params->port_num); + addr6 = SDW_DPN_LANECTRL_B0(t_params->port_num); + } + + /* Program DPN_PortCtrl register */ + wbuf = p_params->data_mode << SDW_REG_SHIFT(SDW_DPN_PORTCTRL_DATAMODE); + wbuf |= p_params->flow_mode; + + ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_PortCtrl register write failed for port %d", + t_params->port_num); + return ret; + } + + /* Program DPN_BlockCtrl1 register */ + ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1)); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_BlockCtrl1 register write failed for port %d", + t_params->port_num); + return ret; + } + + /* Program DPN_SampleCtrl1 register */ + wbuf = (t_params->sample_interval - 1) & SDW_DPN_SAMPLECTRL_LOW; + ret = sdw_write(s_rt->slave, addr3, wbuf); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_SampleCtrl1 register write failed for port %d", + t_params->port_num); + return ret; + } + + /* Program DPN_OffsetCtrl1 registers */ + ret = sdw_write(s_rt->slave, addr4, t_params->offset1); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_OffsetCtrl1 register write failed for port %d", + t_params->port_num); + return ret; + } + + /* Program DPN_BlockCtrl2 register*/ + if (t_params->blk_grp_ctrl_valid) { + ret = sdw_write(s_rt->slave, addr5, t_params->blk_grp_ctrl); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_BlockCtrl2 reg write failed for port %d", + t_params->port_num); + return ret; + } + } + + /* program DPN_LaneCtrl register */ + if (slave_prop->lane_control_support) { + ret = sdw_write(s_rt->slave, addr6, t_params->lane_ctrl); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "DPN_LaneCtrl register write failed for port %d", + t_params->port_num); + return ret; + } + } + + if (dpn_prop->type != SDW_DPN_SIMPLE) { + ret = _sdw_program_slave_port_params(bus, s_rt->slave, + t_params, dpn_prop->type); + if (ret < 0) + dev_err(&s_rt->slave->dev, + "Transport reg write failed for port: %d", + t_params->port_num); + } + + return ret; +} + +static int sdw_program_master_port_params(struct sdw_bus *bus, + struct sdw_port_runtime *p_rt) +{ + int ret; + + /* + * we need to set transport and port parameters for the port. + * Transport parameters refers to the smaple interval, offsets and + * hstart/stop etc of the data. Port parameters refers to word + * length, flow mode etc of the port + */ + ret = bus->port_ops->dpn_set_port_transport_params(bus, + &p_rt->transport_params, + bus->params.next_bank); + if (ret < 0) + return ret; + + return bus->port_ops->dpn_set_port_params(bus, + &p_rt->port_params, + bus->params.next_bank); +} + +/** + * sdw_program_port_params() - Programs transport parameters of Master(s) + * and Slave(s) + * + * @m_rt: Master stream runtime + */ +static int sdw_program_port_params(struct sdw_master_runtime *m_rt) +{ + struct sdw_slave_runtime *s_rt = NULL; + struct sdw_bus *bus = m_rt->bus; + struct sdw_port_runtime *p_rt; + int ret = 0; + + /* Program transport & port parameters for Slave(s) */ + list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { + list_for_each_entry(p_rt, &s_rt->port_list, port_node) { + ret = sdw_program_slave_port_params(bus, s_rt, p_rt); + if (ret < 0) + return ret; + } + } + + /* Program transport & port parameters for Master(s) */ + list_for_each_entry(p_rt, &m_rt->port_list, port_node) { + ret = sdw_program_master_port_params(bus, p_rt); + if (ret < 0) + return ret; + } + + return 0; +} + +/** + * sdw_enable_disable_slave_ports: Enable/disable slave data port + * + * @bus: bus instance + * @s_rt: slave runtime + * @p_rt: port runtime + * @en: enable or disable operation + * + * This function only sets the enable/disable bits in the relevant bank, the + * actual enable/disable is done with a bank switch + */ +static int sdw_enable_disable_slave_ports(struct sdw_bus *bus, + struct sdw_slave_runtime *s_rt, + struct sdw_port_runtime *p_rt, bool en) +{ + struct sdw_transport_params *t_params = &p_rt->transport_params; + u32 addr; + int ret; + + if (bus->params.next_bank) + addr = SDW_DPN_CHANNELEN_B1(p_rt->num); + else + addr = SDW_DPN_CHANNELEN_B0(p_rt->num); + + /* + * Since bus doesn't support sharing a port across two streams, + * it is safe to reset this register + */ + if (en) + ret = sdw_update(s_rt->slave, addr, 0xFF, p_rt->ch_mask); + else + ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0); + + if (ret < 0) + dev_err(&s_rt->slave->dev, + "Slave chn_en reg write failed:%d port:%d", + ret, t_params->port_num); + + return ret; +} + +static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt, + struct sdw_port_runtime *p_rt, bool en) +{ + struct sdw_transport_params *t_params = &p_rt->transport_params; + struct sdw_bus *bus = m_rt->bus; + struct sdw_enable_ch enable_ch; + int ret = 0; + + enable_ch.port_num = p_rt->num; + enable_ch.ch_mask = p_rt->ch_mask; + enable_ch.enable = en; + + /* Perform Master port channel(s) enable/disable */ + if (bus->port_ops->dpn_port_enable_ch) { + ret = bus->port_ops->dpn_port_enable_ch(bus, + &enable_ch, bus->params.next_bank); + if (ret < 0) { + dev_err(bus->dev, + "Master chn_en write failed:%d port:%d", + ret, t_params->port_num); + return ret; + } + } else { + dev_err(bus->dev, + "dpn_port_enable_ch not supported, %s failed\n", + en ? "enable" : "disable"); + return -EINVAL; + } + + return 0; +} + +/** + * sdw_enable_disable_ports() - Enable/disable port(s) for Master and + * Slave(s) + * + * @m_rt: Master stream runtime + * @en: mode (enable/disable) + */ +static int sdw_enable_disable_ports(struct sdw_master_runtime *m_rt, bool en) +{ + struct sdw_port_runtime *s_port, *m_port; + struct sdw_slave_runtime *s_rt = NULL; + int ret = 0; + + /* Enable/Disable Slave port(s) */ + list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { + list_for_each_entry(s_port, &s_rt->port_list, port_node) { + ret = sdw_enable_disable_slave_ports(m_rt->bus, s_rt, + s_port, en); + if (ret < 0) + return ret; + } + } + + /* Enable/Disable Master port(s) */ + list_for_each_entry(m_port, &m_rt->port_list, port_node) { + ret = sdw_enable_disable_master_ports(m_rt, m_port, en); + if (ret < 0) + return ret; + } + + return 0; +} + +static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt, + struct sdw_prepare_ch prep_ch, enum sdw_port_prep_ops cmd) +{ + const struct sdw_slave_ops *ops = s_rt->slave->ops; + int ret; + + if (ops->port_prep) { + ret = ops->port_prep(s_rt->slave, &prep_ch, cmd); + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "Slave Port Prep cmd %d failed: %d", cmd, ret); + return ret; + } + } + + return 0; +} + +static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus, + struct sdw_slave_runtime *s_rt, + struct sdw_port_runtime *p_rt, bool prep) +{ + struct completion *port_ready = NULL; + struct sdw_dpn_prop *dpn_prop; + struct sdw_prepare_ch prep_ch; + unsigned int time_left; + bool intr = false; + int ret = 0, val; + u32 addr; + + prep_ch.num = p_rt->num; + prep_ch.ch_mask = p_rt->ch_mask; + + dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, + s_rt->direction, + prep_ch.num); + if (!dpn_prop) { + dev_err(bus->dev, + "Slave Port:%d properties not found", prep_ch.num); + return -EINVAL; + } + + prep_ch.prepare = prep; + + prep_ch.bank = bus->params.next_bank; + + if (dpn_prop->device_interrupts || !dpn_prop->simple_ch_prep_sm) + intr = true; + + /* + * Enable interrupt before Port prepare. + * For Port de-prepare, it is assumed that port + * was prepared earlier + */ + if (prep && intr) { + ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep, + dpn_prop->device_interrupts); + if (ret < 0) + return ret; + } + + /* Inform slave about the impending port prepare */ + sdw_do_port_prep(s_rt, prep_ch, SDW_OPS_PORT_PRE_PREP); + + /* Prepare Slave port implementing CP_SM */ + if (!dpn_prop->simple_ch_prep_sm) { + addr = SDW_DPN_PREPARECTRL(p_rt->num); + + if (prep) + ret = sdw_update(s_rt->slave, addr, + 0xFF, p_rt->ch_mask); + else + ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0); + + if (ret < 0) { + dev_err(&s_rt->slave->dev, + "Slave prep_ctrl reg write failed"); + return ret; + } + + /* Wait for completion on port ready */ + port_ready = &s_rt->slave->port_ready[prep_ch.num]; + time_left = wait_for_completion_timeout(port_ready, + msecs_to_jiffies(dpn_prop->ch_prep_timeout)); + + val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num)); + val &= p_rt->ch_mask; + if (!time_left || val) { + dev_err(&s_rt->slave->dev, + "Chn prep failed for port:%d", prep_ch.num); + return -ETIMEDOUT; + } + } + + /* Inform slaves about ports prepared */ + sdw_do_port_prep(s_rt, prep_ch, SDW_OPS_PORT_POST_PREP); + + /* Disable interrupt after Port de-prepare */ + if (!prep && intr) + ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep, + dpn_prop->device_interrupts); + + return ret; +} + +static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt, + struct sdw_port_runtime *p_rt, bool prep) +{ + struct sdw_transport_params *t_params = &p_rt->transport_params; + struct sdw_bus *bus = m_rt->bus; + const struct sdw_master_port_ops *ops = bus->port_ops; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = p_rt->num; + prep_ch.ch_mask = p_rt->ch_mask; + prep_ch.prepare = prep; /* Prepare/De-prepare */ + prep_ch.bank = bus->params.next_bank; + + /* Pre-prepare/Pre-deprepare port(s) */ + if (ops->dpn_port_prep) { + ret = ops->dpn_port_prep(bus, &prep_ch); + if (ret < 0) { + dev_err(bus->dev, "Port prepare failed for port:%d", + t_params->port_num); + return ret; + } + } + + return ret; +} + +/** + * sdw_prep_deprep_ports() - Prepare/De-prepare port(s) for Master(s) and + * Slave(s) + * + * @m_rt: Master runtime handle + * @prep: Prepare or De-prepare + */ +static int sdw_prep_deprep_ports(struct sdw_master_runtime *m_rt, bool prep) +{ + struct sdw_slave_runtime *s_rt = NULL; + struct sdw_port_runtime *p_rt; + int ret = 0; + + /* Prepare/De-prepare Slave port(s) */ + list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { + list_for_each_entry(p_rt, &s_rt->port_list, port_node) { + ret = sdw_prep_deprep_slave_ports(m_rt->bus, s_rt, + p_rt, prep); + if (ret < 0) + return ret; + } + } + + /* Prepare/De-prepare Master port(s) */ + list_for_each_entry(p_rt, &m_rt->port_list, port_node) { + ret = sdw_prep_deprep_master_ports(m_rt, p_rt, prep); + if (ret < 0) + return ret; + } + + return ret; +} + +/** + * sdw_notify_config() - Notify bus configuration + * + * @m_rt: Master runtime handle + * + * This function notifies the Master(s) and Slave(s) of the + * new bus configuration. + */ +static int sdw_notify_config(struct sdw_master_runtime *m_rt) +{ + struct sdw_slave_runtime *s_rt; + struct sdw_bus *bus = m_rt->bus; + struct sdw_slave *slave; + int ret = 0; + + if (bus->ops->set_bus_conf) { + ret = bus->ops->set_bus_conf(bus, &bus->params); + if (ret < 0) + return ret; + } + + list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { + slave = s_rt->slave; + + if (slave->ops->bus_config) { + ret = slave->ops->bus_config(slave, &bus->params); + if (ret < 0) + dev_err(bus->dev, "Notify Slave: %d failed", + slave->dev_num); + return ret; + } + } + + return ret; +} + +/** + * sdw_program_params() - Program transport and port parameters for Master(s) + * and Slave(s) + * + * @bus: SDW bus instance + */ +static int sdw_program_params(struct sdw_bus *bus) +{ + struct sdw_master_runtime *m_rt = NULL; + int ret = 0; + + list_for_each_entry(m_rt, &bus->m_rt_list, bus_node) { + ret = sdw_program_port_params(m_rt); + if (ret < 0) { + dev_err(bus->dev, + "Program transport params failed: %d", ret); + return ret; + } + + ret = sdw_notify_config(m_rt); + if (ret < 0) { + dev_err(bus->dev, "Notify bus config failed: %d", ret); + return ret; + } + + /* Enable port(s) on alternate bank for all active streams */ + if (m_rt->stream->state != SDW_STREAM_ENABLED) + continue; + + ret = sdw_enable_disable_ports(m_rt, true); + if (ret < 0) { + dev_err(bus->dev, "Enable channel failed: %d", ret); + return ret; + } + } + + return ret; +} + +static int sdw_bank_switch(struct sdw_bus *bus) +{ + int col_index, row_index; + struct sdw_msg *wr_msg; + u8 *wbuf = NULL; + int ret = 0; + u16 addr; + + wr_msg = kzalloc(sizeof(*wr_msg), GFP_KERNEL); + if (!wr_msg) + return -ENOMEM; + + wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); + if (!wbuf) { + ret = -ENOMEM; + goto error_1; + } + + /* Get row and column index to program register */ + col_index = sdw_find_col_index(bus->params.col); + row_index = sdw_find_row_index(bus->params.row); + wbuf[0] = col_index | (row_index << 3); + + if (bus->params.next_bank) + addr = SDW_SCP_FRAMECTRL_B1; + else + addr = SDW_SCP_FRAMECTRL_B0; + + sdw_fill_msg(wr_msg, NULL, addr, 1, SDW_BROADCAST_DEV_NUM, + SDW_MSG_FLAG_WRITE, wbuf); + wr_msg->ssp_sync = true; + + ret = sdw_transfer(bus, wr_msg); + if (ret < 0) { + dev_err(bus->dev, "Slave frame_ctrl reg write failed"); + goto error; + } + + kfree(wr_msg); + kfree(wbuf); + bus->defer_msg.msg = NULL; + bus->params.curr_bank = !bus->params.curr_bank; + bus->params.next_bank = !bus->params.next_bank; + + return 0; + +error: + kfree(wbuf); +error_1: + kfree(wr_msg); + return ret; +} + +static int do_bank_switch(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + const struct sdw_master_ops *ops; + struct sdw_bus *bus = m_rt->bus; + int ret = 0; + + ops = bus->ops; + + /* Pre-bank switch */ + if (ops->pre_bank_switch) { + ret = ops->pre_bank_switch(bus); + if (ret < 0) { + dev_err(bus->dev, "Pre bank switch op failed: %d", ret); + return ret; + } + } + + /* Bank switch */ + ret = sdw_bank_switch(bus); + if (ret < 0) { + dev_err(bus->dev, "Bank switch failed: %d", ret); + return ret; + } + + /* Post-bank switch */ + if (ops->post_bank_switch) { + ret = ops->post_bank_switch(bus); + if (ret < 0) { + dev_err(bus->dev, + "Post bank switch op failed: %d", ret); + } + } + + return ret; +} + +/** + * sdw_release_stream() - Free the assigned stream runtime + * + * @stream: SoundWire stream runtime + * + * sdw_release_stream should be called only once per stream + */ +void sdw_release_stream(struct sdw_stream_runtime *stream) +{ + kfree(stream); +} +EXPORT_SYMBOL(sdw_release_stream); + +/** + * sdw_alloc_stream() - Allocate and return stream runtime + * + * @stream_name: SoundWire stream name + * + * Allocates a SoundWire stream runtime instance. + * sdw_alloc_stream should be called only once per stream. Typically + * invoked from ALSA/ASoC machine/platform driver. + */ +struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name) +{ + struct sdw_stream_runtime *stream; + + stream = kzalloc(sizeof(*stream), GFP_KERNEL); + if (!stream) + return NULL; + + stream->name = stream_name; + stream->state = SDW_STREAM_ALLOCATED; + + return stream; +} +EXPORT_SYMBOL(sdw_alloc_stream); + +/** + * sdw_alloc_master_rt() - Allocates and initialize Master runtime handle + * + * @bus: SDW bus instance + * @stream_config: Stream configuration + * @stream: Stream runtime handle. + * + * This function is to be called with bus_lock held. + */ +static struct sdw_master_runtime +*sdw_alloc_master_rt(struct sdw_bus *bus, + struct sdw_stream_config *stream_config, + struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt; + + m_rt = stream->m_rt; + + /* + * check if Master is already allocated (as a result of Slave adding + * it first), if so skip allocation and go to configure + */ + if (m_rt) + goto stream_config; + + m_rt = kzalloc(sizeof(*m_rt), GFP_KERNEL); + if (!m_rt) + return NULL; + + /* Initialization of Master runtime handle */ + INIT_LIST_HEAD(&m_rt->port_list); + INIT_LIST_HEAD(&m_rt->slave_rt_list); + stream->m_rt = m_rt; + + list_add_tail(&m_rt->bus_node, &bus->m_rt_list); + +stream_config: + m_rt->ch_count = stream_config->ch_count; + m_rt->bus = bus; + m_rt->stream = stream; + m_rt->direction = stream_config->direction; + + return m_rt; +} + +/** + * sdw_alloc_slave_rt() - Allocate and initialize Slave runtime handle. + * + * @slave: Slave handle + * @stream_config: Stream configuration + * @stream: Stream runtime handle + * + * This function is to be called with bus_lock held. + */ +static struct sdw_slave_runtime +*sdw_alloc_slave_rt(struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + struct sdw_stream_runtime *stream) +{ + struct sdw_slave_runtime *s_rt = NULL; + + s_rt = kzalloc(sizeof(*s_rt), GFP_KERNEL); + if (!s_rt) + return NULL; + + INIT_LIST_HEAD(&s_rt->port_list); + s_rt->ch_count = stream_config->ch_count; + s_rt->direction = stream_config->direction; + s_rt->slave = slave; + + return s_rt; +} + +static void sdw_master_port_release(struct sdw_bus *bus, + struct sdw_master_runtime *m_rt) +{ + struct sdw_port_runtime *p_rt, *_p_rt; + + list_for_each_entry_safe(p_rt, _p_rt, + &m_rt->port_list, port_node) { + list_del(&p_rt->port_node); + kfree(p_rt); + } +} + +static void sdw_slave_port_release(struct sdw_bus *bus, + struct sdw_slave *slave, + struct sdw_stream_runtime *stream) +{ + struct sdw_port_runtime *p_rt, *_p_rt; + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_slave_runtime *s_rt; + + list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { + if (s_rt->slave != slave) + continue; + + list_for_each_entry_safe(p_rt, _p_rt, + &s_rt->port_list, port_node) { + list_del(&p_rt->port_node); + kfree(p_rt); + } + } +} + +/** + * sdw_release_slave_stream() - Free Slave(s) runtime handle + * + * @slave: Slave handle. + * @stream: Stream runtime handle. + * + * This function is to be called with bus_lock held. + */ +static void sdw_release_slave_stream(struct sdw_slave *slave, + struct sdw_stream_runtime *stream) +{ + struct sdw_slave_runtime *s_rt, *_s_rt; + struct sdw_master_runtime *m_rt = stream->m_rt; + + /* Retrieve Slave runtime handle */ + list_for_each_entry_safe(s_rt, _s_rt, + &m_rt->slave_rt_list, m_rt_node) { + + if (s_rt->slave == slave) { + list_del(&s_rt->m_rt_node); + kfree(s_rt); + return; + } + } +} + +/** + * sdw_release_master_stream() - Free Master runtime handle + * + * @stream: Stream runtime handle. + * + * This function is to be called with bus_lock held + * It frees the Master runtime handle and associated Slave(s) runtime + * handle. If this is called first then sdw_release_slave_stream() will have + * no effect as Slave(s) runtime handle would already be freed up. + */ +static void sdw_release_master_stream(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_slave_runtime *s_rt, *_s_rt; + + list_for_each_entry_safe(s_rt, _s_rt, + &m_rt->slave_rt_list, m_rt_node) + sdw_stream_remove_slave(s_rt->slave, stream); + + list_del(&m_rt->bus_node); +} + +/** + * sdw_stream_remove_master() - Remove master from sdw_stream + * + * @bus: SDW Bus instance + * @stream: SoundWire stream + * + * This removes and frees port_rt and master_rt from a stream + */ +int sdw_stream_remove_master(struct sdw_bus *bus, + struct sdw_stream_runtime *stream) +{ + mutex_lock(&bus->bus_lock); + + sdw_release_master_stream(stream); + sdw_master_port_release(bus, stream->m_rt); + stream->state = SDW_STREAM_RELEASED; + kfree(stream->m_rt); + stream->m_rt = NULL; + + mutex_unlock(&bus->bus_lock); + + return 0; +} +EXPORT_SYMBOL(sdw_stream_remove_master); + +/** + * sdw_stream_remove_slave() - Remove slave from sdw_stream + * + * @slave: SDW Slave instance + * @stream: SoundWire stream + * + * This removes and frees port_rt and slave_rt from a stream + */ +int sdw_stream_remove_slave(struct sdw_slave *slave, + struct sdw_stream_runtime *stream) +{ + mutex_lock(&slave->bus->bus_lock); + + sdw_slave_port_release(slave->bus, slave, stream); + sdw_release_slave_stream(slave, stream); + + mutex_unlock(&slave->bus->bus_lock); + + return 0; +} +EXPORT_SYMBOL(sdw_stream_remove_slave); + +/** + * sdw_config_stream() - Configure the allocated stream + * + * @dev: SDW device + * @stream: SoundWire stream + * @stream_config: Stream configuration for audio stream + * @is_slave: is API called from Slave or Master + * + * This function is to be called with bus_lock held. + */ +static int sdw_config_stream(struct device *dev, + struct sdw_stream_runtime *stream, + struct sdw_stream_config *stream_config, bool is_slave) +{ + /* + * Update the stream rate, channel and bps based on data + * source. For more than one data source (multilink), + * match the rate, bps, stream type and increment number of channels. + * + * If rate/bps is zero, it means the values are not set, so skip + * comparison and allow the value to be set and stored in stream + */ + if (stream->params.rate && + stream->params.rate != stream_config->frame_rate) { + dev_err(dev, "rate not matching, stream:%s", stream->name); + return -EINVAL; + } + + if (stream->params.bps && + stream->params.bps != stream_config->bps) { + dev_err(dev, "bps not matching, stream:%s", stream->name); + return -EINVAL; + } + + stream->type = stream_config->type; + stream->params.rate = stream_config->frame_rate; + stream->params.bps = stream_config->bps; + + /* TODO: Update this check during Device-device support */ + if (is_slave) + stream->params.ch_count += stream_config->ch_count; + + return 0; +} + +static int sdw_is_valid_port_range(struct device *dev, + struct sdw_port_runtime *p_rt) +{ + if (!SDW_VALID_PORT_RANGE(p_rt->num)) { + dev_err(dev, + "SoundWire: Invalid port number :%d", p_rt->num); + return -EINVAL; + } + + return 0; +} + +static struct sdw_port_runtime *sdw_port_alloc(struct device *dev, + struct sdw_port_config *port_config, + int port_index) +{ + struct sdw_port_runtime *p_rt; + + p_rt = kzalloc(sizeof(*p_rt), GFP_KERNEL); + if (!p_rt) + return NULL; + + p_rt->ch_mask = port_config[port_index].ch_mask; + p_rt->num = port_config[port_index].num; + + return p_rt; +} + +static int sdw_master_port_config(struct sdw_bus *bus, + struct sdw_master_runtime *m_rt, + struct sdw_port_config *port_config, + unsigned int num_ports) +{ + struct sdw_port_runtime *p_rt; + int i; + + /* Iterate for number of ports to perform initialization */ + for (i = 0; i < num_ports; i++) { + p_rt = sdw_port_alloc(bus->dev, port_config, i); + if (!p_rt) + return -ENOMEM; + + /* + * TODO: Check port capabilities for requested + * configuration (audio mode support) + */ + + list_add_tail(&p_rt->port_node, &m_rt->port_list); + } + + return 0; +} + +static int sdw_slave_port_config(struct sdw_slave *slave, + struct sdw_slave_runtime *s_rt, + struct sdw_port_config *port_config, + unsigned int num_config) +{ + struct sdw_port_runtime *p_rt; + int i, ret; + + /* Iterate for number of ports to perform initialization */ + for (i = 0; i < num_config; i++) { + p_rt = sdw_port_alloc(&slave->dev, port_config, i); + if (!p_rt) + return -ENOMEM; + + /* + * TODO: Check valid port range as defined by DisCo/ + * slave + */ + ret = sdw_is_valid_port_range(&slave->dev, p_rt); + if (ret < 0) { + kfree(p_rt); + return ret; + } + + /* + * TODO: Check port capabilities for requested + * configuration (audio mode support) + */ + + list_add_tail(&p_rt->port_node, &s_rt->port_list); + } + + return 0; +} + +/** + * sdw_stream_add_master() - Allocate and add master runtime to a stream + * + * @bus: SDW Bus instance + * @stream_config: Stream configuration for audio stream + * @port_config: Port configuration for audio stream + * @num_ports: Number of ports + * @stream: SoundWire stream + */ +int sdw_stream_add_master(struct sdw_bus *bus, + struct sdw_stream_config *stream_config, + struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = NULL; + int ret; + + mutex_lock(&bus->bus_lock); + + m_rt = sdw_alloc_master_rt(bus, stream_config, stream); + if (!m_rt) { + dev_err(bus->dev, + "Master runtime config failed for stream:%s", + stream->name); + ret = -ENOMEM; + goto error; + } + + ret = sdw_config_stream(bus->dev, stream, stream_config, false); + if (ret) + goto stream_error; + + ret = sdw_master_port_config(bus, m_rt, port_config, num_ports); + if (ret) + goto stream_error; + + stream->state = SDW_STREAM_CONFIGURED; + +stream_error: + sdw_release_master_stream(stream); +error: + mutex_unlock(&bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_stream_add_master); + +/** + * sdw_stream_add_slave() - Allocate and add master/slave runtime to a stream + * + * @slave: SDW Slave instance + * @stream_config: Stream configuration for audio stream + * @stream: SoundWire stream + * @port_config: Port configuration for audio stream + * @num_ports: Number of ports + */ +int sdw_stream_add_slave(struct sdw_slave *slave, + struct sdw_stream_config *stream_config, + struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream) +{ + struct sdw_slave_runtime *s_rt; + struct sdw_master_runtime *m_rt; + int ret; + + mutex_lock(&slave->bus->bus_lock); + + /* + * If this API is invoked by Slave first then m_rt is not valid. + * So, allocate m_rt and add Slave to it. + */ + m_rt = sdw_alloc_master_rt(slave->bus, stream_config, stream); + if (!m_rt) { + dev_err(&slave->dev, + "alloc master runtime failed for stream:%s", + stream->name); + ret = -ENOMEM; + goto error; + } + + s_rt = sdw_alloc_slave_rt(slave, stream_config, stream); + if (!s_rt) { + dev_err(&slave->dev, + "Slave runtime config failed for stream:%s", + stream->name); + ret = -ENOMEM; + goto stream_error; + } + + ret = sdw_config_stream(&slave->dev, stream, stream_config, true); + if (ret) + goto stream_error; + + list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list); + + ret = sdw_slave_port_config(slave, s_rt, port_config, num_ports); + if (ret) + goto stream_error; + + stream->state = SDW_STREAM_CONFIGURED; + goto error; + +stream_error: + /* + * we hit error so cleanup the stream, release all Slave(s) and + * Master runtime + */ + sdw_release_master_stream(stream); +error: + mutex_unlock(&slave->bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_stream_add_slave); + +/** + * sdw_get_slave_dpn_prop() - Get Slave port capabilities + * + * @slave: Slave handle + * @direction: Data direction. + * @port_num: Port number + */ +struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, + enum sdw_data_direction direction, + unsigned int port_num) +{ + struct sdw_dpn_prop *dpn_prop; + u8 num_ports; + int i; + + if (direction == SDW_DATA_DIR_TX) { + num_ports = hweight32(slave->prop.source_ports); + dpn_prop = slave->prop.src_dpn_prop; + } else { + num_ports = hweight32(slave->prop.sink_ports); + dpn_prop = slave->prop.sink_dpn_prop; + } + + for (i = 0; i < num_ports; i++) { + dpn_prop = &dpn_prop[i]; + + if (dpn_prop->num == port_num) + return &dpn_prop[i]; + } + + return NULL; +} + +static int _sdw_prepare_stream(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_bus *bus = m_rt->bus; + struct sdw_master_prop *prop = NULL; + struct sdw_bus_params params; + int ret; + + prop = &bus->prop; + memcpy(¶ms, &bus->params, sizeof(params)); + + /* TODO: Support Asynchronous mode */ + if ((prop->max_freq % stream->params.rate) != 0) { + dev_err(bus->dev, "Async mode not supported"); + return -EINVAL; + } + + /* Increment cumulative bus bandwidth */ + /* TODO: Update this during Device-Device support */ + bus->params.bandwidth += m_rt->stream->params.rate * + m_rt->ch_count * m_rt->stream->params.bps; + + /* Program params */ + ret = sdw_program_params(bus); + if (ret < 0) { + dev_err(bus->dev, "Program params failed: %d", ret); + goto restore_params; + } + + ret = do_bank_switch(stream); + if (ret < 0) { + dev_err(bus->dev, "Bank switch failed: %d", ret); + goto restore_params; + } + + /* Prepare port(s) on the new clock configuration */ + ret = sdw_prep_deprep_ports(m_rt, true); + if (ret < 0) { + dev_err(bus->dev, "Prepare port(s) failed ret = %d", + ret); + return ret; + } + + stream->state = SDW_STREAM_PREPARED; + + return ret; + +restore_params: + memcpy(&bus->params, ¶ms, sizeof(params)); + return ret; +} + +/** + * sdw_prepare_stream() - Prepare SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/soundwire/stream.txt explains this API in detail + */ +int sdw_prepare_stream(struct sdw_stream_runtime *stream) +{ + int ret = 0; + + if (!stream) { + pr_err("SoundWire: Handle not found for stream"); + return -EINVAL; + } + + mutex_lock(&stream->m_rt->bus->bus_lock); + + ret = _sdw_prepare_stream(stream); + if (ret < 0) + pr_err("Prepare for stream:%s failed: %d", stream->name, ret); + + mutex_unlock(&stream->m_rt->bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_prepare_stream); + +static int _sdw_enable_stream(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_bus *bus = m_rt->bus; + int ret; + + /* Program params */ + ret = sdw_program_params(bus); + if (ret < 0) { + dev_err(bus->dev, "Program params failed: %d", ret); + return ret; + } + + /* Enable port(s) */ + ret = sdw_enable_disable_ports(m_rt, true); + if (ret < 0) { + dev_err(bus->dev, "Enable port(s) failed ret: %d", ret); + return ret; + } + + ret = do_bank_switch(stream); + if (ret < 0) { + dev_err(bus->dev, "Bank switch failed: %d", ret); + return ret; + } + + stream->state = SDW_STREAM_ENABLED; + return 0; +} + +/** + * sdw_enable_stream() - Enable SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/soundwire/stream.txt explains this API in detail + */ +int sdw_enable_stream(struct sdw_stream_runtime *stream) +{ + int ret = 0; + + if (!stream) { + pr_err("SoundWire: Handle not found for stream"); + return -EINVAL; + } + + mutex_lock(&stream->m_rt->bus->bus_lock); + + ret = _sdw_enable_stream(stream); + if (ret < 0) + pr_err("Enable for stream:%s failed: %d", stream->name, ret); + + mutex_unlock(&stream->m_rt->bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_enable_stream); + +static int _sdw_disable_stream(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_bus *bus = m_rt->bus; + int ret; + + /* Disable port(s) */ + ret = sdw_enable_disable_ports(m_rt, false); + if (ret < 0) { + dev_err(bus->dev, "Disable port(s) failed: %d", ret); + return ret; + } + + stream->state = SDW_STREAM_DISABLED; + + /* Program params */ + ret = sdw_program_params(bus); + if (ret < 0) { + dev_err(bus->dev, "Program params failed: %d", ret); + return ret; + } + + return do_bank_switch(stream); +} + +/** + * sdw_disable_stream() - Disable SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/soundwire/stream.txt explains this API in detail + */ +int sdw_disable_stream(struct sdw_stream_runtime *stream) +{ + int ret = 0; + + if (!stream) { + pr_err("SoundWire: Handle not found for stream"); + return -EINVAL; + } + + mutex_lock(&stream->m_rt->bus->bus_lock); + + ret = _sdw_disable_stream(stream); + if (ret < 0) + pr_err("Disable for stream:%s failed: %d", stream->name, ret); + + mutex_unlock(&stream->m_rt->bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_disable_stream); + +static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream) +{ + struct sdw_master_runtime *m_rt = stream->m_rt; + struct sdw_bus *bus = m_rt->bus; + int ret = 0; + + /* De-prepare port(s) */ + ret = sdw_prep_deprep_ports(m_rt, false); + if (ret < 0) { + dev_err(bus->dev, "De-prepare port(s) failed: %d", ret); + return ret; + } + + stream->state = SDW_STREAM_DEPREPARED; + + /* TODO: Update this during Device-Device support */ + bus->params.bandwidth -= m_rt->stream->params.rate * + m_rt->ch_count * m_rt->stream->params.bps; + + /* Program params */ + ret = sdw_program_params(bus); + if (ret < 0) { + dev_err(bus->dev, "Program params failed: %d", ret); + return ret; + } + + return do_bank_switch(stream); +} + +/** + * sdw_deprepare_stream() - Deprepare SoundWire stream + * + * @stream: Soundwire stream + * + * Documentation/soundwire/stream.txt explains this API in detail + */ +int sdw_deprepare_stream(struct sdw_stream_runtime *stream) +{ + int ret = 0; + + if (!stream) { + pr_err("SoundWire: Handle not found for stream"); + return -EINVAL; + } + + mutex_lock(&stream->m_rt->bus->bus_lock); + + ret = _sdw_deprepare_stream(stream); + if (ret < 0) + pr_err("De-prepare for stream:%d failed: %d", ret, ret); + + mutex_unlock(&stream->m_rt->bus->bus_lock); + return ret; +} +EXPORT_SYMBOL(sdw_deprepare_stream); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index fd4848392e0d..e8f4ac9400ea 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -270,7 +270,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", - &idev->dev->kobj); + &idev->dev.kobj); if (!idev->map_dir) { ret = -ENOMEM; goto err_map; @@ -299,7 +299,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", - &idev->dev->kobj); + &idev->dev.kobj); if (!idev->portio_dir) { ret = -ENOMEM; goto err_portio; @@ -342,7 +342,7 @@ err_map_kobj: kobject_put(&map->kobj); } kobject_put(idev->map_dir); - dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); + dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } @@ -379,7 +379,7 @@ static int uio_get_minor(struct uio_device *idev) idev->minor = retval; retval = 0; } else if (retval == -ENOSPC) { - dev_err(idev->dev, "too many uio devices\n"); + dev_err(&idev->dev, "too many uio devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); @@ -433,6 +433,7 @@ static int uio_open(struct inode *inode, struct file *filep) struct uio_device *idev; struct uio_listener *listener; int ret = 0; + unsigned long flags; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); @@ -442,9 +443,11 @@ static int uio_open(struct inode *inode, struct file *filep) goto out; } + get_device(&idev->dev); + if (!try_module_get(idev->owner)) { ret = -ENODEV; - goto out; + goto err_module_get; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); @@ -457,11 +460,13 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - if (idev->info->open) { + spin_lock_irqsave(&idev->info_lock, flags); + if (idev->info && idev->info->open) ret = idev->info->open(idev->info, inode); - if (ret) - goto err_infoopen; - } + spin_unlock_irqrestore(&idev->info_lock, flags); + if (ret) + goto err_infoopen; + return 0; err_infoopen: @@ -470,6 +475,9 @@ err_infoopen: err_alloc_listener: module_put(idev->owner); +err_module_get: + put_device(&idev->dev); + out: return ret; } @@ -487,12 +495,16 @@ static int uio_release(struct inode *inode, struct file *filep) int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; + unsigned long flags; - if (idev->info->release) + spin_lock_irqsave(&idev->info_lock, flags); + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); + spin_unlock_irqrestore(&idev->info_lock, flags); module_put(idev->owner); kfree(listener); + put_device(&idev->dev); return ret; } @@ -500,9 +512,16 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; + __poll_t ret = 0; + unsigned long flags; - if (!idev->info->irq) - return -EIO; + spin_lock_irqsave(&idev->info_lock, flags); + if (!idev->info || !idev->info->irq) + ret = -EIO; + spin_unlock_irqrestore(&idev->info_lock, flags); + + if (ret) + return ret; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) @@ -516,11 +535,17 @@ static ssize_t uio_read(struct file *filep, char __user *buf, struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); - ssize_t retval; + ssize_t retval = 0; s32 event_count; + unsigned long flags; - if (!idev->info->irq) - return -EIO; + spin_lock_irqsave(&idev->info_lock, flags); + if (!idev->info || !idev->info->irq) + retval = -EIO; + spin_unlock_irqrestore(&idev->info_lock, flags); + + if (retval) + return retval; if (count != sizeof(s32)) return -EINVAL; @@ -567,21 +592,33 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; + unsigned long flags; - if (!idev->info->irq) - return -EIO; + spin_lock_irqsave(&idev->info_lock, flags); + if (!idev->info || !idev->info->irq) { + retval = -EIO; + goto out; + } - if (count != sizeof(s32)) - return -EINVAL; + if (count != sizeof(s32)) { + retval = -EINVAL; + goto out; + } - if (!idev->info->irqcontrol) - return -ENOSYS; + if (!idev->info->irqcontrol) { + retval = -ENOSYS; + goto out; + } - if (copy_from_user(&irq_on, buf, count)) - return -EFAULT; + if (copy_from_user(&irq_on, buf, count)) { + retval = -EFAULT; + goto out; + } retval = idev->info->irqcontrol(idev->info, irq_on); +out: + spin_unlock_irqrestore(&idev->info_lock, flags); return retval ? retval : sizeof(s32); } @@ -597,7 +634,7 @@ static int uio_find_mem_index(struct vm_area_struct *vma) return -1; } -static int uio_vma_fault(struct vm_fault *vmf) +static vm_fault_t uio_vma_fault(struct vm_fault *vmf) { struct uio_device *idev = vmf->vma->vm_private_data; struct page *page; @@ -794,6 +831,13 @@ static void release_uio_class(void) uio_major_cleanup(); } +static void uio_device_release(struct device *dev) +{ + struct uio_device *idev = dev_get_drvdata(dev); + + kfree(idev); +} + /** * uio_register_device - register a new userspace IO device * @owner: module that creates the new device @@ -814,13 +858,14 @@ int __uio_register_device(struct module *owner, info->uio_dev = NULL; - idev = devm_kzalloc(parent, sizeof(*idev), GFP_KERNEL); + idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { return -ENOMEM; } idev->owner = owner; idev->info = info; + spin_lock_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); @@ -828,14 +873,19 @@ int __uio_register_device(struct module *owner, if (ret) return ret; - idev->dev = device_create(&uio_class, parent, - MKDEV(uio_major, idev->minor), idev, - "uio%d", idev->minor); - if (IS_ERR(idev->dev)) { - printk(KERN_ERR "UIO: device register failed\n"); - ret = PTR_ERR(idev->dev); + idev->dev.devt = MKDEV(uio_major, idev->minor); + idev->dev.class = &uio_class; + idev->dev.parent = parent; + idev->dev.release = uio_device_release; + dev_set_drvdata(&idev->dev, idev); + + ret = dev_set_name(&idev->dev, "uio%d", idev->minor); + if (ret) + goto err_device_create; + + ret = device_register(&idev->dev); + if (ret) goto err_device_create; - } ret = uio_dev_add_attributes(idev); if (ret) @@ -863,7 +913,7 @@ int __uio_register_device(struct module *owner, err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: - device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); + device_unregister(&idev->dev); err_device_create: uio_free_minor(idev); return ret; @@ -878,6 +928,7 @@ EXPORT_SYMBOL_GPL(__uio_register_device); void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; + unsigned long flags; if (!info || !info->uio_dev) return; @@ -891,7 +942,11 @@ void uio_unregister_device(struct uio_info *info) if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); - device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); + spin_lock_irqsave(&idev->info_lock, flags); + idev->info = NULL; + spin_unlock_irqrestore(&idev->info_lock, flags); + + device_unregister(&idev->dev); return; } diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c index b46323d9dc18..b55191335d90 100644 --- a/drivers/uio/uio_fsl_elbc_gpcm.c +++ b/drivers/uio/uio_fsl_elbc_gpcm.c @@ -475,7 +475,6 @@ MODULE_DEVICE_TABLE(of, uio_fsl_elbc_gpcm_match); static struct platform_driver uio_fsl_elbc_gpcm_driver = { .driver = { .name = "fsl,elbc-gpcm-uio", - .owner = THIS_MODULE, .of_match_table = uio_fsl_elbc_gpcm_match, }, .probe = uio_fsl_elbc_gpcm_probe, diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 398d22693234..6e2a9619192d 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -121,7 +121,9 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, if (!buf) return -ENOMEM; - if (copy_from_user(buf, (void *)arg, hdr.size_in)) { + *((struct vbg_ioctl_hdr *)buf) = hdr; + if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr), + hdr.size_in - sizeof(hdr))) { ret = -EFAULT; goto out; } diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c index 74f2e6e6202a..8851d441e5fd 100644 --- a/drivers/w1/masters/mxc_w1.c +++ b/drivers/w1/masters/mxc_w1.c @@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev) if (IS_ERR(mdev->clk)) return PTR_ERR(mdev->clk); + err = clk_prepare_enable(mdev->clk); + if (err) + return err; + clkrate = clk_get_rate(mdev->clk); if (clkrate < 10000000) dev_warn(&pdev->dev, @@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdev->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mdev->regs)) - return PTR_ERR(mdev->regs); - - err = clk_prepare_enable(mdev->clk); - if (err) - return err; + if (IS_ERR(mdev->regs)) { + err = PTR_ERR(mdev->regs); + goto out_disable_clk; + } /* Software reset 1-Wire module */ writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET); @@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev) err = w1_add_master_device(&mdev->bus_master); if (err) - clk_disable_unprepare(mdev->clk); + goto out_disable_clk; + return 0; + +out_disable_clk: + clk_disable_unprepare(mdev->clk); return err; } diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 80a778b02f28..caef0e0fd817 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -751,7 +751,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) /* slave modules need to be loaded in a context with unlocked mutex */ mutex_unlock(&dev->mutex); - request_module("w1-family-0x%02x", rn->family); + request_module("w1-family-0x%02X", rn->family); mutex_lock(&dev->mutex); spin_lock(&w1_flock); |